repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sonnyhu/numpy
|
numpy/linalg/lapack_lite/clapack_scrub.py
|
76
|
7701
|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import sys, os
from io import StringIO
import re
from Plex import *
from Plex.Traditional import re as Re
class MyScanner(Scanner):
def __init__(self, info, name='<default>'):
Scanner.__init__(self, self.lexicon, info, name)
def begin(self, state_name):
# if self.state_name == '':
# print '<default>'
# else:
# print self.state_name
Scanner.begin(self, state_name)
def sep_seq(sequence, sep):
pat = Str(sequence[0])
for s in sequence[1:]:
pat += sep + Str(s)
return pat
def runScanner(data, scanner_class, lexicon=None):
info = StringIO(data)
outfo = StringIO()
if lexicon is not None:
scanner = scanner_class(lexicon, info)
else:
scanner = scanner_class(info)
while True:
value, text = scanner.read()
if value is None:
break
elif value is IGNORE:
pass
else:
outfo.write(value)
return outfo.getvalue(), scanner
class LenSubsScanner(MyScanner):
"""Following clapack, we remove ftnlen arguments, which f2c puts after
a char * argument to hold the length of the passed string. This is just
a nuisance in C.
"""
def __init__(self, info, name='<ftnlen>'):
MyScanner.__init__(self, info, name)
self.paren_count = 0
def beginArgs(self, text):
if self.paren_count == 0:
self.begin('args')
self.paren_count += 1
return text
def endArgs(self, text):
self.paren_count -= 1
if self.paren_count == 0:
self.begin('')
return text
digits = Re('[0-9]+')
iofun = Re(r'\([^;]*;')
decl = Re(r'\([^)]*\)[,;'+'\n]')
any = Re('[.]*')
S = Re('[ \t\n]*')
cS = Str(',') + S
len_ = Re('[a-z][a-z0-9]*_len')
iofunctions = Str("s_cat", "s_copy", "s_stop", "s_cmp",
"i_len", "do_fio", "do_lio") + iofun
# Routines to not scrub the ftnlen argument from
keep_ftnlen = (Str('ilaenv_') | Str('s_rnge')) + Str('(')
lexicon = Lexicon([
(iofunctions, TEXT),
(keep_ftnlen, beginArgs),
State('args', [
(Str(')'), endArgs),
(Str('('), beginArgs),
(AnyChar, TEXT),
]),
(cS+Re(r'[1-9][0-9]*L'), IGNORE),
(cS+Str('ftnlen')+Opt(S+len_), IGNORE),
(cS+sep_seq(['(', 'ftnlen', ')'], S)+S+digits, IGNORE),
(Bol+Str('ftnlen ')+len_+Str(';\n'), IGNORE),
(cS+len_, TEXT),
(AnyChar, TEXT),
])
def scrubFtnlen(source):
return runScanner(source, LenSubsScanner)[0]
def cleanSource(source):
# remove whitespace at end of lines
source = re.sub(r'[\t ]+\n', '\n', source)
# remove comments like .. Scalar Arguments ..
source = re.sub(r'(?m)^[\t ]*/\* *\.\. .*?\n', '', source)
# collapse blanks of more than two in-a-row to two
source = re.sub(r'\n\n\n\n+', r'\n\n\n', source)
return source
class LineQueue(object):
def __init__(self):
object.__init__(self)
self._queue = []
def add(self, line):
self._queue.append(line)
def clear(self):
self._queue = []
def flushTo(self, other_queue):
for line in self._queue:
other_queue.add(line)
self.clear()
def getValue(self):
q = LineQueue()
self.flushTo(q)
s = ''.join(q._queue)
self.clear()
return s
class CommentQueue(LineQueue):
def __init__(self):
LineQueue.__init__(self)
def add(self, line):
if line.strip() == '':
LineQueue.add(self, '\n')
else:
line = ' ' + line[2:-3].rstrip() + '\n'
LineQueue.add(self, line)
def flushTo(self, other_queue):
if len(self._queue) == 0:
pass
elif len(self._queue) == 1:
other_queue.add('/*' + self._queue[0][2:].rstrip() + ' */\n')
else:
other_queue.add('/*\n')
LineQueue.flushTo(self, other_queue)
other_queue.add('*/\n')
self.clear()
# This really seems to be about 4x longer than it needs to be
def cleanComments(source):
lines = LineQueue()
comments = CommentQueue()
def isCommentLine(line):
return line.startswith('/*') and line.endswith('*/\n')
blanks = LineQueue()
def isBlank(line):
return line.strip() == ''
def SourceLines(line):
if isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
lines.add(line)
return SourceLines
def HaveCommentLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
lines.add(line)
return SourceLines
def HaveBlankLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
blanks.flushTo(comments)
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
blanks.flushTo(lines)
lines.add(line)
return SourceLines
state = SourceLines
for line in StringIO(source):
state = state(line)
comments.flushTo(lines)
return lines.getValue()
def removeHeader(source):
lines = LineQueue()
def LookingForHeader(line):
m = re.match(r'/\*[^\n]*-- translated', line)
if m:
return InHeader
else:
lines.add(line)
return LookingForHeader
def InHeader(line):
if line.startswith('*/'):
return OutOfHeader
else:
return InHeader
def OutOfHeader(line):
if line.startswith('#include "f2c.h"'):
pass
else:
lines.add(line)
return OutOfHeader
state = LookingForHeader
for line in StringIO(source):
state = state(line)
return lines.getValue()
def replaceDlamch(source):
"""Replace dlamch_ calls with appropiate macros"""
def repl(m):
s = m.group(1)
return dict(E='EPSILON', P='PRECISION', S='SAFEMINIMUM',
B='BASE')[s[0]]
source = re.sub(r'dlamch_\("(.*?)"\)', repl, source)
source = re.sub(r'^\s+extern.*? dlamch_.*?;$(?m)', '', source)
return source
# do it
def scrubSource(source, nsteps=None, verbose=False):
steps = [
('scrubbing ftnlen', scrubFtnlen),
('remove header', removeHeader),
('clean source', cleanSource),
('clean comments', cleanComments),
('replace dlamch_() calls', replaceDlamch),
]
if nsteps is not None:
steps = steps[:nsteps]
for msg, step in steps:
if verbose:
print(msg)
source = step(source)
return source
if __name__ == '__main__':
filename = sys.argv[1]
outfilename = os.path.join(sys.argv[2], os.path.basename(filename))
fo = open(filename, 'r')
source = fo.read()
fo.close()
if len(sys.argv) > 3:
nsteps = int(sys.argv[3])
else:
nsteps = None
source = scrub_source(source, nsteps, verbose=True)
writefo = open(outfilename, 'w')
writefo.write(source)
writefo.close()
|
bsd-3-clause
|
OpenPymeMx/OCB
|
addons/l10n_nl/__init__.py
|
424
|
1413
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Veritos - Jan Verlaan - www.veritos.nl
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company like Veritos.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
maxamillion/ansible-modules-core
|
cloud/openstack/os_floating_ip.py
|
12
|
9832
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# Author: Davide Guerri <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_floating_ip
version_added: "2.0"
short_description: Add/Remove floating IP from an instance
extends_documentation_fragment: openstack
description:
- Add or Remove a floating IP to an instance
options:
server:
description:
- The name or ID of the instance to which the IP address
should be assigned.
required: true
network:
description:
- The name or ID of a neutron external network or a nova pool name.
required: false
floating_ip_address:
description:
- A floating IP address to attach or to detach. Required only if I(state)
is absent. When I(state) is present can be used to specify a IP address
to attach.
required: false
reuse:
description:
- When I(state) is present, and I(floating_ip_address) is not present,
this parameter can be used to specify whether we should try to reuse
a floating IP address already allocated to the project.
required: false
default: false
fixed_address:
description:
- To which fixed IP of server the floating IP address should be
attached to.
required: false
nat_destination:
description:
- The name or id of a neutron private network that the fixed IP to
attach floating IP is on
required: false
default: None
aliases: ["fixed_network", "internal_network"]
version_added: "2.3"
wait:
description:
- When attaching a floating IP address, specify whether we should
wait for it to appear as attached.
required: false
default: false
timeout:
description:
- Time to wait for an IP address to appear as attached. See wait.
required: false
default: 60
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
required: false
default: present
purge:
description:
- When I(state) is absent, indicates whether or not to delete the floating
IP completely, or only detach it from the server. Default is to detach only.
required: false
default: false
version_added: "2.1"
requirements: ["shade"]
'''
EXAMPLES = '''
# Assign a floating IP to the fist interface of `cattle001` from an exiting
# external network or nova pool. A new floating IP from the first available
# external network is allocated to the project.
- os_floating_ip:
cloud: dguerri
server: cattle001
# Assign a new floating IP to the instance fixed ip `192.0.2.3` of
# `cattle001`. If a free floating IP is already allocated to the project, it is
# reused; if not, a new one is created.
- os_floating_ip:
cloud: dguerri
state: present
reuse: yes
server: cattle001
network: ext_net
fixed_address: 192.0.2.3
wait: true
timeout: 180
# Assign a new floating IP from the network `ext_net` to the instance fixed
# ip in network `private_net` of `cattle001`.
- os_floating_ip:
cloud: dguerri
state: present
server: cattle001
network: ext_net
nat_destination: private_net
wait: true
timeout: 180
# Detach a floating IP address from a server
- os_floating_ip:
cloud: dguerri
state: absent
floating_ip_address: 203.0.113.2
server: cattle001
'''
def _get_floating_ip(cloud, floating_ip_address):
f_ips = cloud.search_floating_ips(
filters={'floating_ip_address': floating_ip_address})
if not f_ips:
return None
return f_ips[0]
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
network=dict(required=False, default=None),
floating_ip_address=dict(required=False, default=None),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False, default=None),
nat_destination=dict(required=False, default=None,
aliases=['fixed_network', 'internal_network']),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
purge=dict(required=False, type='bool', default=False),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['nat_destination'] and
StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
module.fail_json(msg="To utilize nat_destination, the installed version of"
"the shade library MUST be >= 1.8.0")
server_name_or_id = module.params['server']
state = module.params['state']
network = module.params['network']
floating_ip_address = module.params['floating_ip_address']
reuse = module.params['reuse']
fixed_address = module.params['fixed_address']
nat_destination = module.params['nat_destination']
wait = module.params['wait']
timeout = module.params['timeout']
purge = module.params['purge']
cloud = shade.openstack_cloud(**module.params)
try:
server = cloud.get_server(server_name_or_id)
if server is None:
module.fail_json(
msg="server {0} not found".format(server_name_or_id))
if state == 'present':
# If f_ip already assigned to server, check that it matches
# requirements.
public_ip = cloud.get_server_public_ip(server)
f_ip = _get_floating_ip(cloud, public_ip) if public_ip else public_ip
if f_ip:
if network:
network_id = cloud.get_network(name_or_id=network)["id"]
else:
network_id = None
if all([(fixed_address and f_ip.fixed_ip_address == fixed_address) or
(nat_destination and f_ip.internal_network == fixed_address),
network, f_ip.network != network_id]):
# Current state definitely conflicts with requirements
module.fail_json(msg="server {server} already has a "
"floating-ip on requested "
"interface but it doesn't match "
"requested network {network: {fip}"
.format(server=server_name_or_id,
network=network,
fip=remove_values(f_ip,
module.no_log_values)))
if not network or f_ip.network == network_id:
# Requirements are met
module.exit_json(changed=False, floating_ip=f_ip)
# Requirements are vague enough to ignore existing f_ip and try
# to create a new f_ip to the server.
server = cloud.add_ips_to_server(
server=server, ips=floating_ip_address, ip_pool=network,
reuse=reuse, fixed_address=fixed_address, wait=wait,
timeout=timeout, nat_destination=nat_destination)
fip_address = cloud.get_server_public_ip(server)
# Update the floating IP status
f_ip = _get_floating_ip(cloud, fip_address)
module.exit_json(changed=True, floating_ip=f_ip)
elif state == 'absent':
if floating_ip_address is None:
if not server_name_or_id:
module.fail_json(msg="either server or floating_ip_address are required")
server = cloud.get_server(server_name_or_id)
floating_ip_address = cloud.get_server_public_ip(server)
f_ip = _get_floating_ip(cloud, floating_ip_address)
if not f_ip:
# Nothing to detach
module.exit_json(changed=False)
changed = False
if f_ip["fixed_ip_address"]:
cloud.detach_ip_from_server(
server_id=server['id'], floating_ip_id=f_ip['id'])
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
changed = True
if purge:
cloud.delete_floating_ip(f_ip['id'])
module.exit_json(changed=True)
module.exit_json(changed=changed, floating_ip=f_ip)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
dgzurita/odoo
|
addons/account/project/wizard/account_analytic_chart.py
|
362
|
2100
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_chart(osv.osv_memory):
_name = 'account.analytic.chart'
_description = 'Account Analytic Chart'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
}
def analytic_account_chart_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result_context = {}
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_analytic_account_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
data = self.read(cr, uid, ids, [])[0]
if data['from_date']:
result_context.update({'from_date': data['from_date']})
if data['to_date']:
result_context.update({'to_date': data['to_date']})
result['context'] = str(result_context)
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
RealTimeWeb/wikisite
|
MoinMoin/support/xappy/fieldactions.py
|
4
|
17193
|
#!/usr/bin/env python
#
# Copyright (C) 2007 Lemur Consulting Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
r"""fieldactions.py: Definitions and implementations of field actions.
"""
__docformat__ = "restructuredtext en"
import _checkxapian
import errors
import marshall
from replaylog import log
import xapian
import parsedate
def _act_store_content(fieldname, doc, value, context):
"""Perform the STORE_CONTENT action.
"""
try:
fielddata = doc.data[fieldname]
except KeyError:
fielddata = []
doc.data[fieldname] = fielddata
fielddata.append(value)
def _act_index_exact(fieldname, doc, value, context):
"""Perform the INDEX_EXACT action.
"""
doc.add_term(fieldname, value, 0)
def _act_tag(fieldname, doc, value, context):
"""Perform the TAG action.
"""
doc.add_term(fieldname, value.lower(), 0)
def _act_facet(fieldname, doc, value, context, type=None):
"""Perform the FACET action.
"""
if type is None or type == 'string':
value = value.lower()
doc.add_term(fieldname, value, 0)
serialiser = log(xapian.StringListSerialiser,
doc.get_value(fieldname, 'facet'))
serialiser.append(value)
doc.add_value(fieldname, serialiser.get(), 'facet')
else:
marshaller = SortableMarshaller()
fn = marshaller.get_marshall_function(fieldname, type)
doc.add_value(fieldname, fn(fieldname, value), 'facet')
def _act_index_freetext(fieldname, doc, value, context, weight=1,
language=None, stop=None, spell=False,
nopos=False,
allow_field_specific=True,
search_by_default=True):
"""Perform the INDEX_FREETEXT action.
"""
termgen = log(xapian.TermGenerator)
if language is not None:
termgen.set_stemmer(log(xapian.Stem, language))
if stop is not None:
stopper = log(xapian.SimpleStopper)
for term in stop:
stopper.add (term)
termgen.set_stopper (stopper)
if spell:
termgen.set_database(context.index)
termgen.set_flags(termgen.FLAG_SPELLING)
termgen.set_document(doc._doc)
if search_by_default:
termgen.set_termpos(context.current_position)
# Store a copy of the field without a prefix, for non-field-specific
# searches.
if nopos:
termgen.index_text_without_positions(value, weight, '')
else:
termgen.index_text(value, weight, '')
if allow_field_specific:
# Store a second copy of the term with a prefix, for field-specific
# searches.
prefix = doc._fieldmappings.get_prefix(fieldname)
if len(prefix) != 0:
termgen.set_termpos(context.current_position)
if nopos:
termgen.index_text_without_positions(value, weight, prefix)
else:
termgen.index_text(value, weight, prefix)
# Add a gap between each field instance, so that phrase searches don't
# match across instances.
termgen.increase_termpos(10)
context.current_position = termgen.get_termpos()
class SortableMarshaller(object):
"""Implementation of marshalling for sortable values.
"""
def __init__(self, indexing=True):
if indexing:
self._err = errors.IndexerError
else:
self._err = errors.SearchError
def marshall_string(self, fieldname, value):
"""Marshall a value for sorting in lexicograpical order.
This returns the input as the output, since strings already sort in
lexicographical order.
"""
return value
def marshall_float(self, fieldname, value):
"""Marshall a value for sorting as a floating point value.
"""
# convert the value to a float
try:
value = float(value)
except ValueError:
raise self._err("Value supplied to field %r must be a "
"valid floating point number: was %r" %
(fieldname, value))
return marshall.float_to_string(value)
def marshall_date(self, fieldname, value):
"""Marshall a value for sorting as a date.
"""
try:
value = parsedate.date_from_string(value)
except ValueError, e:
raise self._err("Value supplied to field %r must be a "
"valid date: was %r: error is '%s'" %
(fieldname, value, str(e)))
return marshall.date_to_string(value)
def get_marshall_function(self, fieldname, sorttype):
"""Get a function used to marshall values of a given sorttype.
"""
try:
return {
None: self.marshall_string,
'string': self.marshall_string,
'float': self.marshall_float,
'date': self.marshall_date,
}[sorttype]
except KeyError:
raise self._err("Unknown sort type %r for field %r" %
(sorttype, fieldname))
def _act_sort_and_collapse(fieldname, doc, value, context, type=None):
"""Perform the SORTABLE action.
"""
marshaller = SortableMarshaller()
fn = marshaller.get_marshall_function(fieldname, type)
value = fn(fieldname, value)
doc.add_value(fieldname, value, 'collsort')
class ActionContext(object):
"""The context in which an action is performed.
This is just used to pass term generators, word positions, and the like
around.
"""
def __init__(self, index):
self.current_language = None
self.current_position = 0
self.index = index
class FieldActions(object):
"""An object describing the actions to be performed on a field.
The supported actions are:
- `STORE_CONTENT`: store the unprocessed content of the field in the search
engine database. All fields which need to be displayed or used when
displaying the search results need to be given this action.
- `INDEX_EXACT`: index the exact content of the field as a single search
term. Fields whose contents need to be searchable as an "exact match"
need to be given this action.
- `INDEX_FREETEXT`: index the content of this field as text. The content
will be split into terms, allowing free text searching of the field. Four
optional parameters may be supplied:
- 'weight' is a multiplier to apply to the importance of the field. This
must be an integer, and the default value is 1.
- 'language' is the language to use when processing the field. This can
be expressed as an ISO 2-letter language code. The supported languages
are those supported by the xapian core in use.
- 'stop' is an iterable of stopwords to filter out of the generated
terms. Note that due to Xapian design, only non-positional terms are
affected, so this is of limited use.
- 'spell' is a boolean flag - if true, the contents of the field will be
used for spelling correction.
- 'nopos' is a boolean flag - if true, positional information is not
stored.
- 'allow_field_specific' is a boolean flag - if False, prevents terms with the field
prefix being generated. This means that searches specific to this
field will not work, and thus should only be used when only non-field
specific searches are desired. Defaults to True.
- 'search_by_default' is a boolean flag - if False, the field will not be
searched by non-field specific searches. If True, or omitted, the
field will be included in searches for non field-specific searches.
- `SORTABLE`: index the content of the field such that it can be used to
sort result sets. It also allows result sets to be restricted to those
documents with a field values in a given range. One optional parameter
may be supplied:
- 'type' is a value indicating how to sort the field. It has several
possible values:
- 'string' - sort in lexicographic (ie, alphabetical) order.
This is the default, used if no type is set.
- 'float' - treat the values as (decimal representations of) floating
point numbers, and sort in numerical order. The values in the field
must be valid floating point numbers (according to Python's float()
function).
- 'date' - sort in date order. The values must be valid dates (either
Python datetime.date objects, or ISO 8601 format (ie, YYYYMMDD or
YYYY-MM-DD).
- `COLLAPSE`: index the content of the field such that it can be used to
"collapse" result sets, such that only the highest result with each value
of the field will be returned.
- `TAG`: the field contains tags; these are strings, which will be matched
in a case insensitive way, but otherwise must be exact matches. Tag
fields can be searched for by making an explict query (ie, using
query_field(), but not with query_parse()). A list of the most frequent
tags in a result set can also be accessed easily.
- `FACET`: the field represents a classification facet; these are strings
which will be matched exactly, but a list of all the facets present in
the result set can also be accessed easily - in addition, a suitable
subset of the facets, and a selection of the facet values, present in the
result set can be calculated. One optional parameter may be supplied:
- 'type' is a value indicating the type of facet contained in the field:
- 'string' - the facet values are exact binary strings.
- 'float' - the facet values are floating point numbers.
"""
# See the class docstring for the meanings of the following constants.
STORE_CONTENT = 1
INDEX_EXACT = 2
INDEX_FREETEXT = 3
SORTABLE = 4
COLLAPSE = 5
TAG = 6
FACET = 7
# Sorting and collapsing store the data in a value, but the format depends
# on the sort type. Easiest way to implement is to treat them as the same
# action.
SORT_AND_COLLAPSE = -1
_unsupported_actions = []
if 'tags' in _checkxapian.missing_features:
_unsupported_actions.append(TAG)
if 'facets' in _checkxapian.missing_features:
_unsupported_actions.append(FACET)
def __init__(self, fieldname):
# Dictionary of actions, keyed by type.
self._actions = {}
self._fieldname = fieldname
def add(self, field_mappings, action, **kwargs):
"""Add an action to perform on a field.
"""
if action in self._unsupported_actions:
raise errors.IndexerError("Action unsupported with this release of xapian")
if action not in (FieldActions.STORE_CONTENT,
FieldActions.INDEX_EXACT,
FieldActions.INDEX_FREETEXT,
FieldActions.SORTABLE,
FieldActions.COLLAPSE,
FieldActions.TAG,
FieldActions.FACET,
):
raise errors.IndexerError("Unknown field action: %r" % action)
info = self._action_info[action]
# Check parameter names
for key in kwargs.keys():
if key not in info[1]:
raise errors.IndexerError("Unknown parameter name for action %r: %r" % (info[0], key))
# Fields cannot be indexed both with "EXACT" and "FREETEXT": whilst we
# could implement this, the query parser wouldn't know what to do with
# searches.
if action == FieldActions.INDEX_EXACT:
if FieldActions.INDEX_FREETEXT in self._actions:
raise errors.IndexerError("Field %r is already marked for indexing "
"as free text: cannot mark for indexing "
"as exact text as well" % self._fieldname)
if action == FieldActions.INDEX_FREETEXT:
if FieldActions.INDEX_EXACT in self._actions:
raise errors.IndexerError("Field %r is already marked for indexing "
"as exact text: cannot mark for indexing "
"as free text as well" % self._fieldname)
# Fields cannot be indexed as more than one type for "SORTABLE": to
# implement this, we'd need to use a different prefix for each sortable
# type, but even then the search end wouldn't know what to sort on when
# searching. Also, if they're indexed as "COLLAPSE", the value must be
# stored in the right format for the type "SORTABLE".
if action == FieldActions.SORTABLE or action == FieldActions.COLLAPSE:
if action == FieldActions.COLLAPSE:
sorttype = None
else:
try:
sorttype = kwargs['type']
except KeyError:
sorttype = 'string'
kwargs['type'] = sorttype
action = FieldActions.SORT_AND_COLLAPSE
try:
oldsortactions = self._actions[FieldActions.SORT_AND_COLLAPSE]
except KeyError:
oldsortactions = ()
if len(oldsortactions) > 0:
for oldsortaction in oldsortactions:
oldsorttype = oldsortaction['type']
if sorttype == oldsorttype or oldsorttype is None:
# Use new type
self._actions[action] = []
elif sorttype is None:
# Use old type
return
else:
raise errors.IndexerError("Field %r is already marked for "
"sorting, with a different "
"sort type" % self._fieldname)
if 'prefix' in info[3]:
field_mappings.add_prefix(self._fieldname)
if 'slot' in info[3]:
purposes = info[3]['slot']
if isinstance(purposes, basestring):
field_mappings.add_slot(self._fieldname, purposes)
else:
slotnum = None
for purpose in purposes:
slotnum = field_mappings.get_slot(self._fieldname, purpose)
if slotnum is not None:
break
for purpose in purposes:
field_mappings.add_slot(self._fieldname, purpose, slotnum=slotnum)
# Make an entry for the action
if action not in self._actions:
self._actions[action] = []
# Check for repetitions of actions
for old_action in self._actions[action]:
if old_action == kwargs:
return
# Append the action to the list of actions
self._actions[action].append(kwargs)
def perform(self, doc, value, context):
"""Perform the actions on the field.
- `doc` is a ProcessedDocument to store the result of the actions in.
- `value` is a string holding the value of the field.
- `context` is an ActionContext object used to keep state in.
"""
for type, actionlist in self._actions.iteritems():
info = self._action_info[type]
for kwargs in actionlist:
info[2](self._fieldname, doc, value, context, **kwargs)
_action_info = {
STORE_CONTENT: ('STORE_CONTENT', (), _act_store_content, {}, ),
INDEX_EXACT: ('INDEX_EXACT', (), _act_index_exact, {'prefix': True}, ),
INDEX_FREETEXT: ('INDEX_FREETEXT', ('weight', 'language', 'stop', 'spell', 'nopos', 'allow_field_specific', 'search_by_default', ),
_act_index_freetext, {'prefix': True, }, ),
SORTABLE: ('SORTABLE', ('type', ), None, {'slot': 'collsort',}, ),
COLLAPSE: ('COLLAPSE', (), None, {'slot': 'collsort',}, ),
TAG: ('TAG', (), _act_tag, {'prefix': True,}, ),
FACET: ('FACET', ('type', ), _act_facet, {'prefix': True, 'slot': 'facet',}, ),
SORT_AND_COLLAPSE: ('SORT_AND_COLLAPSE', ('type', ), _act_sort_and_collapse, {'slot': 'collsort',}, ),
}
if __name__ == '__main__':
import doctest, sys
doctest.testmod (sys.modules[__name__])
|
apache-2.0
|
samdoran/ansible
|
lib/ansible/module_utils/json_utils.py
|
62
|
3352
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
try:
import json
except ImportError:
import simplejson as json
# NB: a copy of this function exists in ../../modules/core/async_wrapper.py. Ensure any
# changes are propagated there.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
for line in trailing_junk:
if line.strip():
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
break
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
|
gpl-3.0
|
smalls257/VRvisu
|
Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/subprocess.py
|
32
|
54425
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
output = check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen("cmd", mode='r', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen("cmd", mode='w', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode,
bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as
the command to execute, in which case arguments will be passed
directly to the program without shell intervention. This usage can be
replaced as follows:
(child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode,
bufsize)
==>
p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
Return code handling translates as follows:
pipe = os.popen("cmd", 'w')
...
rc = pipe.close()
if rc is not None and rc % 256:
print "There were some errors"
==>
process = Popen("cmd", 'w', shell=True, stdin=PIPE)
...
process.stdin.close()
if process.wait() != 0:
print "There were some errors"
Replacing popen2.*
------------------
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
On Unix, popen2 also accepts a sequence as the command to execute, in
which case arguments will be passed directly to the program without
shell intervention. This usage can be replaced as follows:
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize,
mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
import signal
import errno
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
"check_output", "CalledProcessError"]
if mswindows:
from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None and res >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint, _active=_active):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
_subprocess.TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
try:
try:
self._set_cloexec_flag(errpipe_write)
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
self._set_cloexec_flag(a, False)
elif a is not None:
os.dup2(a, b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = { None }
for fd in [p2cread, c2pwrite, errwrite]:
if fd not in closed and fd > 2:
os.close(fd)
closed.add(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread is not None and p2cwrite is not None:
os.close(p2cread)
if c2pwrite is not None and c2pread is not None:
os.close(c2pwrite)
if errwrite is not None and errread is not None:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
# Exception limited to 1M
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data != "":
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None:
os.close(fd)
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
sts = 0
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
write_set.remove(self.stdin)
else:
raise
else:
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
|
gpl-3.0
|
kfwang/Glance-OVA-OVF
|
glance/image_cache/client.py
|
10
|
4184
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_serialization import jsonutils as json
from glance.common import client as base_client
from glance.common import exception
from glance import i18n
_ = i18n._
class CacheClient(base_client.BaseClient):
DEFAULT_PORT = 9292
DEFAULT_DOC_ROOT = '/v1'
def delete_cached_image(self, image_id):
"""
Delete a specified image from the cache
"""
self.do_request("DELETE", "/cached_images/%s" % image_id)
return True
def get_cached_images(self, **kwargs):
"""
Returns a list of images stored in the image cache.
"""
res = self.do_request("GET", "/cached_images")
data = json.loads(res.read())['cached_images']
return data
def get_queued_images(self, **kwargs):
"""
Returns a list of images queued for caching
"""
res = self.do_request("GET", "/queued_images")
data = json.loads(res.read())['queued_images']
return data
def delete_all_cached_images(self):
"""
Delete all cached images
"""
res = self.do_request("DELETE", "/cached_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def queue_image_for_caching(self, image_id):
"""
Queue an image for prefetching into cache
"""
self.do_request("PUT", "/queued_images/%s" % image_id)
return True
def delete_queued_image(self, image_id):
"""
Delete a specified image from the cache queue
"""
self.do_request("DELETE", "/queued_images/%s" % image_id)
return True
def delete_all_queued_images(self):
"""
Delete all queued images
"""
res = self.do_request("DELETE", "/queued_images")
data = json.loads(res.read())
num_deleted = data['num_deleted']
return num_deleted
def get_client(host, port=None, timeout=None, use_ssl=False, username=None,
password=None, tenant=None,
auth_url=None, auth_strategy=None,
auth_token=None, region=None,
is_silent_upload=False, insecure=False):
"""
Returns a new client Glance client object based on common kwargs.
If an option isn't specified falls back to common environment variable
defaults.
"""
if auth_url or os.getenv('OS_AUTH_URL'):
force_strategy = 'keystone'
else:
force_strategy = None
creds = {
'username': username or
os.getenv('OS_AUTH_USER', os.getenv('OS_USERNAME')),
'password': password or
os.getenv('OS_AUTH_KEY', os.getenv('OS_PASSWORD')),
'tenant': tenant or
os.getenv('OS_AUTH_TENANT', os.getenv('OS_TENANT_NAME')),
'auth_url': auth_url or
os.getenv('OS_AUTH_URL'),
'strategy': force_strategy or
auth_strategy or
os.getenv('OS_AUTH_STRATEGY', 'noauth'),
'region': region or
os.getenv('OS_REGION_NAME'),
}
if creds['strategy'] == 'keystone' and not creds['auth_url']:
msg = _("--os_auth_url option or OS_AUTH_URL environment variable "
"required when keystone authentication strategy is enabled\n")
raise exception.ClientConfigurationError(msg)
return CacheClient(
host=host,
port=port,
timeout=timeout,
use_ssl=use_ssl,
auth_token=auth_token or
os.getenv('OS_TOKEN'),
creds=creds,
insecure=insecure)
|
apache-2.0
|
sursum/buckanjaren
|
buckanjaren/lib/python3.5/site-packages/psycopg2/tests/test_connection.py
|
8
|
57273
|
#!/usr/bin/env python
# test_connection.py - unit test for connection attributes
#
# Copyright (C) 2008-2011 James Henstridge <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import re
import os
import sys
import time
import threading
import subprocess as sp
from operator import attrgetter
import psycopg2
import psycopg2.errorcodes
from psycopg2 import extensions as ext
from .testutils import (
script_to_py3, unittest, decorate_all_tests, skip_if_no_superuser,
skip_before_postgres, skip_after_postgres, skip_before_libpq,
ConnectingTestCase, skip_if_tpc_disabled, skip_if_windows, slow)
from .testconfig import dsn, dbname
class ConnectionTests(ConnectingTestCase):
def test_closed_attribute(self):
conn = self.conn
self.assertEqual(conn.closed, False)
conn.close()
self.assertEqual(conn.closed, True)
def test_close_idempotent(self):
conn = self.conn
conn.close()
conn.close()
self.assertTrue(conn.closed)
def test_cursor_closed_attribute(self):
conn = self.conn
curs = conn.cursor()
self.assertEqual(curs.closed, False)
curs.close()
self.assertEqual(curs.closed, True)
# Closing the connection closes the cursor:
curs = conn.cursor()
conn.close()
self.assertEqual(curs.closed, True)
@skip_before_postgres(8, 4)
@skip_if_no_superuser
@skip_if_windows
def test_cleanup_on_badconn_close(self):
# ticket #148
conn = self.conn
cur = conn.cursor()
self.assertRaises(psycopg2.OperationalError,
cur.execute, "select pg_terminate_backend(pg_backend_pid())")
self.assertEqual(conn.closed, 2)
conn.close()
self.assertEqual(conn.closed, 1)
def test_reset(self):
conn = self.conn
# switch session characteristics
conn.autocommit = True
conn.isolation_level = 'serializable'
conn.readonly = True
if self.conn.server_version >= 90100:
conn.deferrable = False
self.assertTrue(conn.autocommit)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertTrue(conn.readonly is True)
if self.conn.server_version >= 90100:
self.assertTrue(conn.deferrable is False)
conn.reset()
# now the session characteristics should be reverted
self.assertTrue(not conn.autocommit)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
self.assertTrue(conn.readonly is None)
if self.conn.server_version >= 90100:
self.assertTrue(conn.deferrable is None)
def test_notices(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table chatty (id serial primary key);")
self.assertEqual("CREATE TABLE", cur.statusmessage)
self.assertTrue(conn.notices)
def test_notices_consistent_order(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("""
create temp table table1 (id serial);
create temp table table2 (id serial);
""")
cur.execute("""
create temp table table3 (id serial);
create temp table table4 (id serial);
""")
self.assertEqual(4, len(conn.notices))
self.assertTrue('table1' in conn.notices[0])
self.assertTrue('table2' in conn.notices[1])
self.assertTrue('table3' in conn.notices[2])
self.assertTrue('table4' in conn.notices[3])
@slow
def test_notices_limited(self):
conn = self.conn
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
for i in range(0, 100, 10):
sql = " ".join(["create temp table table%d (id serial);" % j
for j in range(i, i + 10)])
cur.execute(sql)
self.assertEqual(50, len(conn.notices))
self.assertTrue('table99' in conn.notices[-1], conn.notices[-1])
@slow
def test_notices_deque(self):
from collections import deque
conn = self.conn
self.conn.notices = deque()
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("""
create temp table table1 (id serial);
create temp table table2 (id serial);
""")
cur.execute("""
create temp table table3 (id serial);
create temp table table4 (id serial);""")
self.assertEqual(len(conn.notices), 4)
self.assertTrue('table1' in conn.notices.popleft())
self.assertTrue('table2' in conn.notices.popleft())
self.assertTrue('table3' in conn.notices.popleft())
self.assertTrue('table4' in conn.notices.popleft())
self.assertEqual(len(conn.notices), 0)
# not limited, but no error
for i in range(0, 100, 10):
sql = " ".join(["create temp table table2_%d (id serial);" % j
for j in range(i, i + 10)])
cur.execute(sql)
self.assertEqual(len([n for n in conn.notices if 'CREATE TABLE' in n]),
100)
def test_notices_noappend(self):
conn = self.conn
self.conn.notices = None # will make an error swallowes ok
cur = conn.cursor()
if self.conn.server_version >= 90300:
cur.execute("set client_min_messages=debug1")
cur.execute("create temp table table1 (id serial);")
self.assertEqual(self.conn.notices, None)
def test_server_version(self):
self.assertTrue(self.conn.server_version)
def test_protocol_version(self):
self.assertTrue(self.conn.protocol_version in (2, 3),
self.conn.protocol_version)
def test_tpc_unsupported(self):
cnn = self.conn
if cnn.server_version >= 80100:
return self.skipTest("tpc is supported")
self.assertRaises(psycopg2.NotSupportedError,
cnn.xid, 42, "foo", "bar")
@slow
@skip_before_postgres(8, 2)
def test_concurrent_execution(self):
def slave():
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select pg_sleep(4)")
cur.close()
cnn.close()
t1 = threading.Thread(target=slave)
t2 = threading.Thread(target=slave)
t0 = time.time()
t1.start()
t2.start()
t1.join()
t2.join()
self.assertTrue(time.time() - t0 < 7,
"something broken in concurrency")
def test_encoding_name(self):
self.conn.set_client_encoding("EUC_JP")
# conn.encoding is 'EUCJP' now.
cur = self.conn.cursor()
ext.register_type(ext.UNICODE, cur)
cur.execute("select 'foo'::text;")
self.assertEqual(cur.fetchone()[0], 'foo')
def test_connect_nonnormal_envvar(self):
# We must perform encoding normalization at connection time
self.conn.close()
oldenc = os.environ.get('PGCLIENTENCODING')
os.environ['PGCLIENTENCODING'] = 'utf-8' # malformed spelling
try:
self.conn = self.connect()
finally:
if oldenc is not None:
os.environ['PGCLIENTENCODING'] = oldenc
else:
del os.environ['PGCLIENTENCODING']
def test_weakref(self):
from weakref import ref
import gc
conn = psycopg2.connect(dsn)
w = ref(conn)
conn.close()
del conn
gc.collect()
self.assertTrue(w() is None)
@slow
def test_commit_concurrency(self):
# The problem is the one reported in ticket #103. Because of bad
# status check, we commit even when a commit is already on its way.
# We can detect this condition by the warnings.
conn = self.conn
notices = []
stop = []
def committer():
while not stop:
conn.commit()
while conn.notices:
notices.append((2, conn.notices.pop()))
cur = conn.cursor()
t1 = threading.Thread(target=committer)
t1.start()
i = 1
for i in range(1000):
cur.execute("select %s;", (i,))
conn.commit()
while conn.notices:
notices.append((1, conn.notices.pop()))
# Stop the committer thread
stop.append(True)
self.assertTrue(not notices, "%d notices raised" % len(notices))
def test_connect_cursor_factory(self):
import psycopg2.extras
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
def test_cursor_factory(self):
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
self.conn.cursor_factory = psycopg2.extras.DictCursor
self.assertEqual(self.conn.cursor_factory, psycopg2.extras.DictCursor)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertEqual(cur.fetchone()['a'], 1)
self.conn.cursor_factory = None
self.assertEqual(self.conn.cursor_factory, None)
cur = self.conn.cursor()
cur.execute("select 1 as a")
self.assertRaises(TypeError, (lambda r: r['a']), cur.fetchone())
def test_cursor_factory_none(self):
# issue #210
conn = self.connect()
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), ext.cursor)
conn = self.connect(cursor_factory=psycopg2.extras.DictCursor)
cur = conn.cursor(cursor_factory=None)
self.assertEqual(type(cur), psycopg2.extras.DictCursor)
def test_failed_init_status(self):
class SubConnection(ext.connection):
def __init__(self, dsn):
try:
super(SubConnection, self).__init__(dsn)
except Exception:
pass
c = SubConnection("dbname=thereisnosuchdatabasemate password=foobar")
self.assertTrue(c.closed, "connection failed so it must be closed")
self.assertTrue('foobar' not in c.dsn, "password was not obscured")
class ParseDsnTestCase(ConnectingTestCase):
def test_parse_dsn(self):
from psycopg2 import ProgrammingError
self.assertEqual(
ext.parse_dsn('dbname=test user=tester password=secret'),
dict(user='tester', password='secret', dbname='test'),
"simple DSN parsed")
self.assertRaises(ProgrammingError, ext.parse_dsn,
"dbname=test 2 user=tester password=secret")
self.assertEqual(
ext.parse_dsn("dbname='test 2' user=tester password=secret"),
dict(user='tester', password='secret', dbname='test 2'),
"DSN with quoting parsed")
# Can't really use assertRaisesRegexp() here since we need to
# make sure that secret is *not* exposed in the error messgage
# (and it also requires python >= 2.7).
raised = False
try:
# unterminated quote after dbname:
ext.parse_dsn("dbname='test 2 user=tester password=secret")
except ProgrammingError as e:
raised = True
self.assertTrue(str(e).find('secret') < 0,
"DSN was not exposed in error message")
except e:
self.fail("unexpected error condition: " + repr(e))
self.assertTrue(raised, "ProgrammingError raised due to invalid DSN")
@skip_before_libpq(9, 2)
def test_parse_dsn_uri(self):
self.assertEqual(ext.parse_dsn('postgresql://tester:secret@/test'),
dict(user='tester', password='secret', dbname='test'),
"valid URI dsn parsed")
raised = False
try:
# extra '=' after port value
ext.parse_dsn(dsn='postgresql://tester:secret@/test?port=1111=x')
except psycopg2.ProgrammingError as e:
raised = True
self.assertTrue(str(e).find('secret') < 0,
"URI was not exposed in error message")
except e:
self.fail("unexpected error condition: " + repr(e))
self.assertTrue(raised, "ProgrammingError raised due to invalid URI")
def test_unicode_value(self):
snowman = "\u2603"
d = ext.parse_dsn('dbname=' + snowman)
if sys.version_info[0] < 3:
self.assertEqual(d['dbname'], snowman.encode('utf8'))
else:
self.assertEqual(d['dbname'], snowman)
def test_unicode_key(self):
snowman = "\u2603"
self.assertRaises(psycopg2.ProgrammingError, ext.parse_dsn,
snowman + '=' + snowman)
def test_bad_param(self):
self.assertRaises(TypeError, ext.parse_dsn, None)
self.assertRaises(TypeError, ext.parse_dsn, 42)
class MakeDsnTestCase(ConnectingTestCase):
def test_empty_arguments(self):
self.assertEqual(ext.make_dsn(), '')
def test_empty_string(self):
dsn = ext.make_dsn('')
self.assertEqual(dsn, '')
def test_params_validation(self):
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, 'dbnamo=a')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, dbnamo='a')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, 'dbname=a', nosuchparam='b')
def test_empty_param(self):
dsn = ext.make_dsn(dbname='sony', password='')
self.assertDsnEqual(dsn, "dbname=sony password=''")
def test_escape(self):
dsn = ext.make_dsn(dbname='hello world')
self.assertEqual(dsn, "dbname='hello world'")
dsn = ext.make_dsn(dbname=r'back\slash')
self.assertEqual(dsn, r"dbname=back\\slash")
dsn = ext.make_dsn(dbname="quo'te")
self.assertEqual(dsn, r"dbname=quo\'te")
dsn = ext.make_dsn(dbname="with\ttab")
self.assertEqual(dsn, "dbname='with\ttab'")
dsn = ext.make_dsn(dbname=r"\every thing'")
self.assertEqual(dsn, r"dbname='\\every thing\''")
def test_database_is_a_keyword(self):
self.assertEqual(ext.make_dsn(database='sigh'), "dbname=sigh")
def test_params_merging(self):
dsn = ext.make_dsn('dbname=foo host=bar', host='baz')
self.assertDsnEqual(dsn, 'dbname=foo host=baz')
dsn = ext.make_dsn('dbname=foo', user='postgres')
self.assertDsnEqual(dsn, 'dbname=foo user=postgres')
def test_no_dsn_munging(self):
dsnin = 'dbname=a host=b user=c password=d'
dsn = ext.make_dsn(dsnin)
self.assertEqual(dsn, dsnin)
def test_null_args(self):
dsn = ext.make_dsn("dbname=foo", user="bar", password=None)
self.assertDsnEqual(dsn, "dbname=foo user=bar")
@skip_before_libpq(9, 2)
def test_url_is_cool(self):
url = 'postgresql://tester:secret@/test?application_name=wat'
dsn = ext.make_dsn(url)
self.assertEqual(dsn, url)
dsn = ext.make_dsn(url, application_name='woot')
self.assertDsnEqual(dsn,
'dbname=test user=tester password=secret application_name=woot')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, 'postgresql://tester:secret@/test?nosuch=param')
self.assertRaises(psycopg2.ProgrammingError,
ext.make_dsn, url, nosuch="param")
@skip_before_libpq(9, 3)
def test_get_dsn_parameters(self):
conn = self.connect()
d = conn.get_dsn_parameters()
self.assertEqual(d['dbname'], dbname) # the only param we can check reliably
self.assertTrue('password' not in d, d)
class IsolationLevelsTestCase(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
conn = self.connect()
cur = conn.cursor()
try:
cur.execute("drop table isolevel;")
except psycopg2.ProgrammingError:
conn.rollback()
cur.execute("create table isolevel (id integer);")
conn.commit()
conn.close()
def test_isolation_level(self):
conn = self.connect()
self.assertEqual(
conn.isolation_level,
ext.ISOLATION_LEVEL_DEFAULT)
def test_encoding(self):
conn = self.connect()
self.assertTrue(conn.encoding in ext.encodings)
def test_set_isolation_level(self):
conn = self.connect()
curs = conn.cursor()
levels = [
('read uncommitted',
ext.ISOLATION_LEVEL_READ_UNCOMMITTED),
('read committed', ext.ISOLATION_LEVEL_READ_COMMITTED),
('repeatable read', ext.ISOLATION_LEVEL_REPEATABLE_READ),
('serializable', ext.ISOLATION_LEVEL_SERIALIZABLE),
]
for name, level in levels:
conn.set_isolation_level(level)
# the only values available on prehistoric PG versions
if conn.server_version < 80000:
if level in (
ext.ISOLATION_LEVEL_READ_UNCOMMITTED,
ext.ISOLATION_LEVEL_REPEATABLE_READ):
name, level = levels[levels.index((name, level)) + 1]
self.assertEqual(conn.isolation_level, level)
curs.execute('show transaction_isolation;')
got_name = curs.fetchone()[0]
self.assertEqual(name, got_name)
conn.commit()
self.assertRaises(ValueError, conn.set_isolation_level, -1)
self.assertRaises(ValueError, conn.set_isolation_level, 5)
def test_set_isolation_level_autocommit(self):
conn = self.connect()
curs = conn.cursor()
conn.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_DEFAULT)
self.assertTrue(conn.autocommit)
conn.isolation_level = 'serializable'
self.assertEqual(conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertTrue(conn.autocommit)
curs.execute('show transaction_isolation;')
self.assertEqual(curs.fetchone()[0], 'serializable')
def test_set_isolation_level_default(self):
conn = self.connect()
curs = conn.cursor()
conn.autocommit = True
curs.execute("set default_transaction_isolation to 'read committed'")
conn.autocommit = False
conn.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(conn.isolation_level,
ext.ISOLATION_LEVEL_SERIALIZABLE)
curs.execute("show transaction_isolation")
self.assertEqual(curs.fetchone()[0], "serializable")
conn.rollback()
conn.set_isolation_level(ext.ISOLATION_LEVEL_DEFAULT)
curs.execute("show transaction_isolation")
self.assertEqual(curs.fetchone()[0], "read committed")
def test_set_isolation_level_abort(self):
conn = self.connect()
cur = conn.cursor()
self.assertEqual(ext.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("insert into isolevel values (10);")
self.assertEqual(ext.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_INTRANS,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(0, cur.fetchone()[0])
cur.execute("insert into isolevel values (10);")
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
conn.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(psycopg2.extensions.TRANSACTION_STATUS_IDLE,
conn.get_transaction_status())
cur.execute("select count(*) from isolevel;")
self.assertEqual(1, cur.fetchone()[0])
self.assertEqual(conn.isolation_level,
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
def test_isolation_level_autocommit(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_AUTOCOMMIT)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
def test_isolation_level_read_committed(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_READ_COMMITTED)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
def test_isolation_level_serializable(self):
cnn1 = self.connect()
cnn2 = self.connect()
cnn2.set_isolation_level(ext.ISOLATION_LEVEL_SERIALIZABLE)
cur1 = cnn1.cursor()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(0, cur1.fetchone()[0])
cnn1.commit()
cur2 = cnn2.cursor()
cur2.execute("insert into isolevel values (10);")
cur1.execute("insert into isolevel values (20);")
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cnn1.commit()
cur2.execute("select count(*) from isolevel;")
self.assertEqual(1, cur2.fetchone()[0])
cur1.execute("select count(*) from isolevel;")
self.assertEqual(1, cur1.fetchone()[0])
cnn2.commit()
cur1.execute("select count(*) from isolevel;")
self.assertEqual(2, cur1.fetchone()[0])
cur2.execute("select count(*) from isolevel;")
self.assertEqual(2, cur2.fetchone()[0])
def test_isolation_level_closed(self):
cnn = self.connect()
cnn.close()
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 0)
self.assertRaises(psycopg2.InterfaceError,
cnn.set_isolation_level, 1)
def test_setattr_isolation_level_int(self):
cur = self.conn.cursor()
self.conn.isolation_level = ext.ISOLATION_LEVEL_SERIALIZABLE
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_REPEATABLE_READ
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_REPEATABLE_READ)
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_COMMITTED
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.isolation_level = ext.ISOLATION_LEVEL_READ_UNCOMMITTED
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.assertEqual(ext.ISOLATION_LEVEL_DEFAULT, None)
self.conn.isolation_level = ext.ISOLATION_LEVEL_DEFAULT
self.assertEqual(self.conn.isolation_level, None)
cur.execute("SHOW transaction_isolation;")
isol = cur.fetchone()[0]
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isol)
def test_setattr_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.isolation_level = "serializable"
self.assertEqual(self.conn.isolation_level, ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = "repeatable read"
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_REPEATABLE_READ)
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_SERIALIZABLE)
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.isolation_level = "read committed"
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.isolation_level = "read uncommitted"
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(self.conn.isolation_level,
ext.ISOLATION_LEVEL_READ_COMMITTED)
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.isolation_level = "default"
self.assertEqual(self.conn.isolation_level, None)
cur.execute("SHOW transaction_isolation;")
isol = cur.fetchone()[0]
cur.execute("SHOW default_transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isol)
def test_setattr_isolation_level_invalid(self):
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 0)
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', -1)
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 5)
self.assertRaises(ValueError, setattr, self.conn, 'isolation_level', 'bah')
class ConnectionTwoPhaseTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.make_test_table()
self.clear_test_xacts()
def tearDown(self):
self.clear_test_xacts()
ConnectingTestCase.tearDown(self)
def clear_test_xacts(self):
"""Rollback all the prepared transaction in the testing db."""
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
try:
cur.execute(
"select gid from pg_prepared_xacts where database = %s",
(dbname,))
except psycopg2.ProgrammingError:
cnn.rollback()
cnn.close()
return
gids = [r[0] for r in cur]
for gid in gids:
cur.execute("rollback prepared %s;", (gid,))
cnn.close()
def make_test_table(self):
cnn = self.connect()
cur = cnn.cursor()
try:
cur.execute("DROP TABLE test_tpc;")
except psycopg2.ProgrammingError:
cnn.rollback()
cur.execute("CREATE TABLE test_tpc (data text);")
cnn.commit()
cnn.close()
def count_xacts(self):
"""Return the number of prepared xacts currently in the test db."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("""
select count(*) from pg_prepared_xacts
where database = %s;""",
(dbname,))
rv = cur.fetchone()[0]
cnn.close()
return rv
def count_test_records(self):
"""Return the number of records in the test table."""
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select count(*) from test_tpc;")
rv = cur.fetchone()[0]
cnn.close()
return rv
def test_tpc_commit(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, ext.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_commit()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_commit_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_commit(xid)
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(1, self.count_test_records())
def test_tpc_rollback(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
self.assertEqual(cnn.status, ext.STATUS_PREPARED)
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_one_phase(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_rollback_1p');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_rollback()
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_tpc_rollback_recovered(self):
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
self.assertEqual(cnn.status, ext.STATUS_READY)
cnn.tpc_begin(xid)
self.assertEqual(cnn.status, ext.STATUS_BEGIN)
cur = cnn.cursor()
cur.execute("insert into test_tpc values ('test_tpc_commit_rec');")
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn.tpc_prepare()
cnn.close()
self.assertEqual(1, self.count_xacts())
self.assertEqual(0, self.count_test_records())
cnn = self.connect()
xid = cnn.xid(1, "gtrid", "bqual")
cnn.tpc_rollback(xid)
self.assertEqual(cnn.status, ext.STATUS_READY)
self.assertEqual(0, self.count_xacts())
self.assertEqual(0, self.count_test_records())
def test_status_after_recover(self):
cnn = self.connect()
self.assertEqual(ext.STATUS_READY, cnn.status)
cnn.tpc_recover()
self.assertEqual(ext.STATUS_READY, cnn.status)
cur = cnn.cursor()
cur.execute("select 1")
self.assertEqual(ext.STATUS_BEGIN, cnn.status)
cnn.tpc_recover()
self.assertEqual(ext.STATUS_BEGIN, cnn.status)
def test_recovered_xids(self):
# insert a few test xns
cnn = self.connect()
cnn.set_isolation_level(0)
cur = cnn.cursor()
cur.execute("begin; prepare transaction '1-foo';")
cur.execute("begin; prepare transaction '2-bar';")
# read the values to return
cur.execute("""
select gid, prepared, owner, database
from pg_prepared_xacts
where database = %s;""",
(dbname,))
okvals = cur.fetchall()
okvals.sort()
cnn = self.connect()
xids = cnn.tpc_recover()
xids = [xid for xid in xids if xid.database == dbname]
xids.sort(key=attrgetter('gtrid'))
# check the values returned
self.assertEqual(len(okvals), len(xids))
for (xid, (gid, prepared, owner, database)) in zip(xids, okvals):
self.assertEqual(xid.gtrid, gid)
self.assertEqual(xid.prepared, prepared)
self.assertEqual(xid.owner, owner)
self.assertEqual(xid.database, database)
def test_xid_encoding(self):
cnn = self.connect()
xid = cnn.xid(42, "gtrid", "bqual")
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn = self.connect()
cur = cnn.cursor()
cur.execute("select gid from pg_prepared_xacts where database = %s;",
(dbname,))
self.assertEqual('42_Z3RyaWQ=_YnF1YWw=', cur.fetchone()[0])
@slow
def test_xid_roundtrip(self):
for fid, gtrid, bqual in [
(0, "", ""),
(42, "gtrid", "bqual"),
(0x7fffffff, "x" * 64, "y" * 64),
]:
cnn = self.connect()
xid = cnn.xid(fid, gtrid, bqual)
cnn.tpc_begin(xid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [x for x in cnn.tpc_recover() if x.database == dbname]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, fid)
self.assertEqual(xid.gtrid, gtrid)
self.assertEqual(xid.bqual, bqual)
cnn.tpc_rollback(xid)
@slow
def test_unparsed_roundtrip(self):
for tid in [
'',
'hello, world!',
'x' * 199, # PostgreSQL's limit in transaction id length
]:
cnn = self.connect()
cnn.tpc_begin(tid)
cnn.tpc_prepare()
cnn.close()
cnn = self.connect()
xids = [x for x in cnn.tpc_recover() if x.database == dbname]
self.assertEqual(1, len(xids))
xid = xids[0]
self.assertEqual(xid.format_id, None)
self.assertEqual(xid.gtrid, tid)
self.assertEqual(xid.bqual, None)
cnn.tpc_rollback(xid)
def test_xid_construction(self):
from psycopg2.extensions import Xid
x1 = Xid(74, 'foo', 'bar')
self.assertEqual(74, x1.format_id)
self.assertEqual('foo', x1.gtrid)
self.assertEqual('bar', x1.bqual)
def test_xid_from_string(self):
from psycopg2.extensions import Xid
x2 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(42, x2.format_id)
self.assertEqual('gtrid', x2.gtrid)
self.assertEqual('bqual', x2.bqual)
x3 = Xid.from_string('99_xxx_yyy')
self.assertEqual(None, x3.format_id)
self.assertEqual('99_xxx_yyy', x3.gtrid)
self.assertEqual(None, x3.bqual)
def test_xid_to_string(self):
from psycopg2.extensions import Xid
x1 = Xid.from_string('42_Z3RyaWQ=_YnF1YWw=')
self.assertEqual(str(x1), '42_Z3RyaWQ=_YnF1YWw=')
x2 = Xid.from_string('99_xxx_yyy')
self.assertEqual(str(x2), '99_xxx_yyy')
def test_xid_unicode(self):
cnn = self.connect()
x1 = cnn.xid(10, 'uni', 'code')
cnn.tpc_begin(x1)
cnn.tpc_prepare()
cnn.reset()
xid = [x for x in cnn.tpc_recover() if x.database == dbname][0]
self.assertEqual(10, xid.format_id)
self.assertEqual('uni', xid.gtrid)
self.assertEqual('code', xid.bqual)
def test_xid_unicode_unparsed(self):
# We don't expect people shooting snowmen as transaction ids,
# so if something explodes in an encode error I don't mind.
# Let's just check uniconde is accepted as type.
cnn = self.connect()
cnn.set_client_encoding('utf8')
cnn.tpc_begin("transaction-id")
cnn.tpc_prepare()
cnn.reset()
xid = [x for x in cnn.tpc_recover() if x.database == dbname][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('transaction-id', xid.gtrid)
self.assertEqual(None, xid.bqual)
def test_cancel_fails_prepared(self):
cnn = self.connect()
cnn.tpc_begin('cancel')
cnn.tpc_prepare()
self.assertRaises(psycopg2.ProgrammingError, cnn.cancel)
def test_tpc_recover_non_dbapi_connection(self):
from psycopg2.extras import RealDictConnection
cnn = self.connect(connection_factory=RealDictConnection)
cnn.tpc_begin('dict-connection')
cnn.tpc_prepare()
cnn.reset()
xids = cnn.tpc_recover()
xid = [x for x in xids if x.database == dbname][0]
self.assertEqual(None, xid.format_id)
self.assertEqual('dict-connection', xid.gtrid)
self.assertEqual(None, xid.bqual)
decorate_all_tests(ConnectionTwoPhaseTests, skip_if_tpc_disabled)
class TransactionControlTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
self.conn.set_session,
ext.ISOLATION_LEVEL_SERIALIZABLE)
def test_not_in_transaction(self):
cur = self.conn.cursor()
cur.execute("select 1")
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session,
ext.ISOLATION_LEVEL_SERIALIZABLE)
def test_set_isolation_level(self):
cur = self.conn.cursor()
self.conn.set_session(
ext.ISOLATION_LEVEL_SERIALIZABLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
ext.ISOLATION_LEVEL_REPEATABLE_READ)
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session(
isolation_level=ext.ISOLATION_LEVEL_READ_COMMITTED)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session(
isolation_level=ext.ISOLATION_LEVEL_READ_UNCOMMITTED)
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_set_isolation_level_str(self):
cur = self.conn.cursor()
self.conn.set_session("serializable")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("repeatable read")
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'repeatable read')
else:
self.assertEqual(cur.fetchone()[0], 'serializable')
self.conn.rollback()
self.conn.set_session("read committed")
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
self.conn.set_session("read uncommitted")
cur.execute("SHOW transaction_isolation;")
if self.conn.server_version > 80000:
self.assertEqual(cur.fetchone()[0], 'read uncommitted')
else:
self.assertEqual(cur.fetchone()[0], 'read committed')
self.conn.rollback()
def test_bad_isolation_level(self):
self.assertRaises(ValueError, self.conn.set_session, 0)
self.assertRaises(ValueError, self.conn.set_session, 5)
self.assertRaises(ValueError, self.conn.set_session, 'whatever')
def test_set_read_only(self):
self.assertTrue(self.conn.readonly is None)
cur = self.conn.cursor()
self.conn.set_session(readonly=True)
self.assertTrue(self.conn.readonly is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(readonly=False)
self.assertTrue(self.conn.readonly is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_setattr_read_only(self):
cur = self.conn.cursor()
self.conn.readonly = True
self.assertTrue(self.conn.readonly is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.assertRaises(self.conn.ProgrammingError,
setattr, self.conn, 'readonly', False)
self.assertTrue(self.conn.readonly is True)
self.conn.rollback()
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur = self.conn.cursor()
self.conn.readonly = None
self.assertTrue(self.conn.readonly is None)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off') # assume defined by server
self.conn.rollback()
self.conn.readonly = False
self.assertTrue(self.conn.readonly is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_set_default(self):
cur = self.conn.cursor()
cur.execute("SHOW transaction_isolation;")
isolevel = cur.fetchone()[0]
cur.execute("SHOW transaction_read_only;")
readonly = cur.fetchone()[0]
self.conn.rollback()
self.conn.set_session(isolation_level='serializable', readonly=True)
self.conn.set_session(isolation_level='default', readonly='default')
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], isolevel)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], readonly)
@skip_before_postgres(9, 1)
def test_set_deferrable(self):
self.assertTrue(self.conn.deferrable is None)
cur = self.conn.cursor()
self.conn.set_session(readonly=True, deferrable=True)
self.assertTrue(self.conn.deferrable is True)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
self.conn.set_session(deferrable=False)
self.assertTrue(self.conn.deferrable is False)
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
@skip_after_postgres(9, 1)
def test_set_deferrable_error(self):
self.assertRaises(psycopg2.ProgrammingError,
self.conn.set_session, readonly=True, deferrable=True)
self.assertRaises(psycopg2.ProgrammingError,
setattr, self.conn, 'deferrable', True)
@skip_before_postgres(9, 1)
def test_setattr_deferrable(self):
cur = self.conn.cursor()
self.conn.deferrable = True
self.assertTrue(self.conn.deferrable is True)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.assertRaises(self.conn.ProgrammingError,
setattr, self.conn, 'deferrable', False)
self.assertTrue(self.conn.deferrable is True)
self.conn.rollback()
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.rollback()
cur = self.conn.cursor()
self.conn.deferrable = None
self.assertTrue(self.conn.deferrable is None)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off') # assume defined by server
self.conn.rollback()
self.conn.deferrable = False
self.assertTrue(self.conn.deferrable is False)
cur.execute("SHOW transaction_deferrable;")
self.assertEqual(cur.fetchone()[0], 'off')
self.conn.rollback()
def test_mixing_session_attribs(self):
cur = self.conn.cursor()
self.conn.autocommit = True
self.conn.readonly = True
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
self.conn.autocommit = False
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
cur.execute("SHOW default_transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'off')
class AutocommitTests(ConnectingTestCase):
def test_closed(self):
self.conn.close()
self.assertRaises(psycopg2.InterfaceError,
setattr, self.conn, 'autocommit', True)
# The getter doesn't have a guard. We may change this in future
# to make it consistent with other methods; meanwhile let's just check
# it doesn't explode.
try:
self.assertTrue(self.conn.autocommit in (True, False))
except psycopg2.InterfaceError:
pass
def test_default_no_autocommit(self):
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
def test_set_autocommit(self):
self.conn.autocommit = True
self.assertTrue(self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
self.conn.autocommit = False
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_INTRANS)
def test_set_intrans_error(self):
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertRaises(psycopg2.ProgrammingError,
setattr, self.conn, 'autocommit', True)
def test_set_session_autocommit(self):
self.conn.set_session(autocommit=True)
self.assertTrue(self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur = self.conn.cursor()
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
self.conn.set_session(autocommit=False)
self.assertTrue(not self.conn.autocommit)
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_BEGIN)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_INTRANS)
self.conn.rollback()
self.conn.set_session('serializable', readonly=True, autocommit=True)
self.assertTrue(self.conn.autocommit)
cur.execute('select 1;')
self.assertEqual(self.conn.status, ext.STATUS_READY)
self.assertEqual(self.conn.get_transaction_status(),
ext.TRANSACTION_STATUS_IDLE)
cur.execute("SHOW transaction_isolation;")
self.assertEqual(cur.fetchone()[0], 'serializable')
cur.execute("SHOW transaction_read_only;")
self.assertEqual(cur.fetchone()[0], 'on')
class PasswordLeakTestCase(ConnectingTestCase):
def setUp(self):
super(PasswordLeakTestCase, self).setUp()
PasswordLeakTestCase.dsn = None
class GrassingConnection(ext.connection):
"""A connection snitching the dsn away.
This connection passes the dsn to the test case class even if init
fails (e.g. connection error). Test that we mangle the dsn ok anyway.
"""
def __init__(self, *args, **kwargs):
try:
super(PasswordLeakTestCase.GrassingConnection, self).__init__(
*args, **kwargs)
finally:
# The connection is not initialized entirely, however the C
# code should have set the dsn, and it should have scrubbed
# the password away
PasswordLeakTestCase.dsn = self.dsn
def test_leak(self):
self.assertRaises(psycopg2.DatabaseError,
self.GrassingConnection, "dbname=nosuch password=whateva")
self.assertDsnEqual(self.dsn, "dbname=nosuch password=xxx")
@skip_before_libpq(9, 2)
def test_url_leak(self):
self.assertRaises(psycopg2.DatabaseError,
self.GrassingConnection,
"postgres://someone:whateva@localhost/nosuch")
self.assertDsnEqual(self.dsn,
"user=someone password=xxx host=localhost dbname=nosuch")
class SignalTestCase(ConnectingTestCase):
@slow
@skip_before_postgres(8, 2)
def test_bug_551_returning(self):
# Raise an exception trying to decode 'id'
self._test_bug_551(query="""
INSERT INTO test551 (num) VALUES (%s) RETURNING id
""")
@slow
def test_bug_551_no_returning(self):
# Raise an exception trying to decode 'INSERT 0 1'
self._test_bug_551(query="""
INSERT INTO test551 (num) VALUES (%s)
""")
def _test_bug_551(self, query):
script = ("""\
import os
import sys
import time
import signal
import threading
import psycopg2
def handle_sigabort(sig, frame):
sys.exit(1)
def killer():
time.sleep(0.5)
os.kill(os.getpid(), signal.SIGABRT)
signal.signal(signal.SIGABRT, handle_sigabort)
conn = psycopg2.connect(%(dsn)r)
cur = conn.cursor()
cur.execute("create table test551 (id serial, num varchar(50))")
t = threading.Thread(target=killer)
t.daemon = True
t.start()
while True:
cur.execute(%(query)r, ("Hello, world!",))
""" % {'dsn': dsn, 'query': query})
proc = sp.Popen([sys.executable, '-c', script_to_py3(script)],
stdout=sp.PIPE, stderr=sp.PIPE)
(out, err) = proc.communicate()
self.assertNotEqual(proc.returncode, 0)
# Strip [NNN refs] from output
err = re.sub(br'\[[^\]]+\]', b'', err).strip()
self.assertTrue(not err, err)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
mit
|
xtiankisutsa/MARA_Framework
|
tools/androguard/elsim/tests/test_similarity.py
|
12
|
16026
|
#!/usr/bin/env python
# This file is part of Elsim.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import sys, itertools, time, os, random
PATH_INSTALL = "./"
sys.path.append(PATH_INSTALL)
from elsim.similarity.similarity import *
TESTS_RANDOM_SIGN = [ "B[F1]",
"B[G]",
"B[I]B[RF1]B[F0S]B[IF1]B[]B[]B[S]B[SS]B[RF0]B[]B[SP0I]B[GP1]",
"B[R]B[F1]",
"B[]B[]B[IR]",
"B[G]B[SGIGF0]B[RP1G]B[SP1I]B[SG]B[SSGP0]B[F1]B[P0SSGR]B[F1]B[SSSI]B[RF1P0R]B[GSP0RP0P0]B[GI]B[P1]B[I]B[GP1S]",
"B[P0SP1G]B[S]B[SGP0R]B[RI]B[GRS]B[P0]B[GRI]B[I]B[RP0I]B[SGRF0P0]B[I]B[]B[GGSP0]B[P1RSS]B[]B[S]B[IF1GP0]B[IP0P0GP0P1]B[P0RRRF0]B[R]B[R]B[RRF1S]B[F0P1R]",
"B[SP0IP0F0P1]B[GS]B[F1]B[RP0]B[IF0P1S]B[P1]",
"B[P0GSGP1]B[R]B[RP1P0]B[F1SIIGF1]B[G]B[F0SP1IF0I]B[RF1F0SIP1SG]B[P1GF1]B[P1G]B[F1P1GIIIGF1]B[F0F1P1RG]B[F1SF1]B[F1SRSS]B[GP0]B[SP1]B[IIF1]B[GIRGR]B[IP1]B[GG]B[RIP1RF1GS]B[SS]B[SSIP0GSP1]B[GGIGSP1G]B[P1GIGSGGI]B[P0P1IGRSRR]B[P1P0GP1]B[P1F1GGGS]B[RR]B[SIF1]B[SR]B[RSI]B[IIRGF1]",
]
TESTS_CLOSED_SIGN = [
[ "B[I]B[RF1]B[F0S]B[IF1]B[]B[]B[S]B[SS]B[RF0]B[]B[SP0I]B[GP1]", "B[I]B[RF1]B[F0S]B[IF1]B[]B[S]B[SS]B[RF0]B[]B[SP0I]B[GP1]" ],
[ "B[I]B[RF1]B[F0S]B[IF1]B[]B[]B[S]B[SS]B[RF0]B[]B[SP0I]B[GP1]", "B[I]B[RF1]B[F0S]B[IF1]B[]B[]B[S]B[SS]B[RF0]B[]B[SP0I]B[GP0]" ],
]
TESTS_DIFFERENT_SIGN = [
[ "B[F0P1P1P1P0F0P1P1P1P1P1P0F0P1F0P1P1P0P1P1P1P1R]", "B[F0P1F0P1P1]B[SP1P1F0F0F0I]B[F0F0P1G]" ],
]
TESTS_SMALL_SIGN = [
[ "TOTO TOTO", "TOTO TOTO" ],
[ "TITO TOTO", "TOTO TOTO" ],
[ "TOTO TATO", "TOTO TOTO" ],
[ "B[]B[]B[IR]", "B[]B[]B[IR]"],
[ "B[]B[]B[IR]", "B[]B[]B[IR]B"],
[ "HELLO WORLD", "TOTO TOTO" ],
]
CONVERT_RESULT_TEST = { " OK " : 1,
" X " : 0,
}
DEBUG = 0
def test(got, expected, fcmp):
if fcmp(got, expected):
prefix = ' OK '
else:
prefix = ' X '
if DEBUG:
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
return CONVERT_RESULT_TEST[ prefix ]
# C(xx) = C(x)
def test_Idempotency(n, x):
s1 = n.compress(x + x)
s2 = n.compress(x)
return test( s1, s2, lambda x, y : x == y), s1, s2
# C(x) <= C(xy)
def test_Monotonicity(n, x, y):
s1 = n.compress( x )
s2 = n.compress( x + y )
return test( s1, s2, lambda x, y : x <= y ), s1, s2
# C(xy) = C(yx)
def test_Symetry(n, x, y):
s1 = n.compress( x + y )
s2 = n.compress( y + x )
return test( s1, s2, lambda x, y : x == y), s1, s2
# C(xy) + C(z) <= C(xz) + C(yz)
def test_Distributivity(n, x, y, z):
s1 = n.compress( x + y ) + n.compress( z )
s2 = n.compress( x + z ) + n.compress( y + z )
return test( s1, s2, lambda x, y : x <= y ), s1, s2
def print_timing(func):
def wrapper(*arg):
t1 = time.time()
res = func(*arg)
t2 = time.time()
print '-> %0.8f s' % ((t2-t1))
return res
return wrapper
@print_timing
def Idempotency( n, TESTS_TEXT ):
print "Idempotency ",
j = 0
res = 0
cc = 0
for i in itertools.permutations( TESTS_TEXT, 1 ):
r, c1, c2 = test_Idempotency( n, i[0] )
cc += c1
cc += c2
res += r
j += 1
print res, "/", j, cc,
@print_timing
def Monotonicity( n, TESTS_TEXT ):
print "Monotonicity ",
j = 0
res = 0
cc = 0
for i in itertools.permutations( TESTS_TEXT, 2 ):
r, c1, c2 = test_Monotonicity( n, i[0], i[1] )
cc += c1
cc += c2
res += r
j += 1
print res, "/", j, cc,
@print_timing
def Symetry( n, TESTS_TEXT ):
print "Symetry ",
j = 0
res = 0
cc = 0
for i in itertools.permutations( TESTS_TEXT, 2 ):
r, c1, c2 = test_Symetry( n, i[0], i[1] )
cc += c1
cc += c2
res += r
j += 1
print res, "/", j, cc,
@print_timing
def Distributivity( n, TESTS_TEXT ):
print "Distributivity ",
j = 0
cc = 0
res = 0
for i in itertools.permutations( TESTS_TEXT, 3 ):
r, c1, c2 = test_Distributivity( n, i[0], i[1], i[2] )
cc += c1
cc += c2
res += r
j += 1
print res, "/", j, cc,
def TestNCDPermutations(n, ref, threshold):
tres, nb, idx, t = benchmark(n.ncd, ref, threshold, lambda x, y : x <= y)
print "NCD Permutation %f threshold=%f time=%fs for %d/%d" % ( tres, threshold, t, nb, idx )
def TestNCSPermutations(n, ref, threshold):
tres, nb, idx, t = benchmark(n.ncs, ref, threshold, lambda x, y : x >= y)
print "NCS Permutation %f threshold=%f time=%fs for %d/%d" % ( tres, threshold, t, nb, idx )
def TestCMIDPermutations(n, ref, threshold):
tres, nb, idx, t = benchmark(n.cmid, ref, threshold, lambda x, y : x >= y)
print "CMID Permutation %f threshold=%f time=%fs for %d/%d" % ( tres, threshold, t, nb, idx )
def TestNCD( n, tests, type_test ):
TestSim("NCD", tests, type_test, n.ncd)
def TestNCS( n, tests, type_test ):
TestSim("NCS", tests, type_test, n.ncs)
def TestCMID( n, tests, type_test ):
TestSim("CMID", tests, type_test, n.cmid)
def TestCMID2( n ):
x = "HI WORLD"
y = "B[I]B[RF1]B[F0S]B[IF1]B[]B[]B[S]B[SS]B[RF0]B[]B[SP0I]B[GP1]B[SP0IP0F0P1]B[GS]B[F1]B[RP0]B[IF0P1S]B[P1]"
print n.cmid( x, y )
def TestSim(type_sim, tests, type_test, func):
print type_sim, type_test
nb = 0
print "\t",
t1 = time.clock()
for i in tests:
val, _ = func( i[0], i[1] )
print "%d:%f" % (nb, val),
nb += 1
t2 = time.clock()
print "%fs" % (t2 - t1)
def benchmark(func, ref, threshold, fcmp):
nb = 0
idx = 0
tres = 0.0
t1 = time.clock()
for i in itertools.permutations(ref):
perm = ''.join(j for j in i)
res = func(ref, perm)
tres += res
if fcmp(res, threshold):
nb += 1
idx += 1
t2 = time.clock()
return tres/idx, nb, idx, t2 - t1
import math
def entropy(data):
entropy = 0
if len(data) == 0:
return entropy
for x in range(256):
p_x = float(data.count(chr(x)))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
def TestEntropy(n, tests, diff):
nb = 0
t1 = time.clock()
for i in tests:
print n.entropy(i[0])[0], entropy(i[0])
print n.entropy(i[1])[0], entropy(i[1])
nb += test( n.entropy(i[0])[0], n.entropy(i[1])[0], lambda x, y : (max(x,y) - min(x,y)) <= diff )
t2 = time.clock()
print "* Entropy %fs %d/%d" % (t2 - t1, nb, len(tests))
def TestProperties(n, data):
# Properties
Idempotency( n, data )
Monotonicity( n, data )
Symetry( n, data )
Distributivity( n, data )
def TestSmallString(n, data):
for i in data:
print i, n.ncd( i[0], i[1] )
def RandomData():
l = []
for i in range(0,9):
l.append( os.urandom( random.randint(0, 100) ) )
return l
def _TestRDTSC(n, m):
i = 0
t0 = n.RDTSC()
while i < m:
i += 1
t1 = n.RDTSC()
return t1 - t0
def TestRDTSC(n):
print _TestRDTSC(n, 1)
print _TestRDTSC(n, 10)
print _TestRDTSC(n, 100)
print _TestRDTSC(n, 1000)
def TestBenett(n):
X = "B[P0{Ljava/util/Formatter;}P1{Ljava/util/Formatter;<init>()V}P2P2P0{Ljava/lang/StringBuilder;}P1{Ljava/lang/String;valueOf(Ljava/lang/Object;)Ljava/lang/String;}P1{Ljava/lang/StringBuilder;<init>(Ljava/lang/String;)V}P1{Ljava/lang/StringBuilder;append(Ljava/lang/String;)Ljava/lang/StringBuilder;}P1{Ljava/lang/StringBuilder;append(I)Ljava/lang/StringBuilder;}P1{Ljava/lang/StringBuilder;toString()Ljava/lang/String;}P1{Ljava/util/Formatter;format(Ljava/lang/String; [Ljava/lang/Object;)Ljava/util/Formatter;}P1{Ljava/util/Formatter;toString()Ljava/lang/String;}P1{Ljava/lang/String;getBytes()[B}P2P0{Ljava/net/URL;}P1{Ljava/net/URL;<init>(Ljava/lang/String;)V}P1{Ljava/net/URL;openConnection()Ljava/net/URLConnection;}P1{Ljava/net/HttpURLConnection;setDoOutput(Z)V}P1{Ljava/net/HttpURLConnection;setDoInput(Z)V}P1{Ljava/net/HttpURLConnection;setRequestMethod(Ljava/lang/String;)V}P1{Ljava/net/HttpURLConnection;getOutputStream()Ljava/io/OutputStream;}P0{Ljava/io/ByteArrayInputStream;}P1{Ljava/io/ByteArrayInputStream;<init>([B)V}P1{Ljava/io/ByteArrayInputStream;read([B II)I}I]B[P1{Ljava/io/ByteArrayInputStream;close()V}P1{Ljava/io/OutputStream;close()V}P0{Ljava/io/ByteArrayOutputStream;}P1{Ljava/io/ByteArrayOutputStream;<init>()V}P0{Ljava/io/BufferedInputStream;}P1{Ljava/net/HttpURLConnection;getInputStream()Ljava/io/InputStream;}P1{Ljava/io/BufferedInputStream;<init>(Ljava/io/InputStream;)V}P1{Ljava/io/InputStream;read([BII)I}I]B[P1{Ljava/io/InputStream;close()V}P1{Ljava/io/ByteArrayOutputStream;size()I}I]B[P1{Landroid/content/Context;getSharedPreferences(Ljava/lang/String;I)Landroid/content/SharedPreferences;}P1{Landroid/content/SharedPreferences;edit()Landroid/content/SharedPreferences$Editor;}P1{Landroid/content/SharedPreferences$Editor;putInt(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor;}P1{Landroid/content/SharedPreferences$Editor;commit()Z}]B[R]B[P1{Ljava/io/OutputStream;write([BII)V}P1{Ljava/io/OutputStream;flush()V}G]B[P1{Ljava/io/ByteArrayOutputStream;write([B I I)V}G]"
Y = "B[P2P2I]B[P2R]B[P0{Landroid/content/Intent;}P1{Landroid/content/Intent;<init>(Ljava/lang/String;)V}P1{Landroid/app/PendingIntent;getBroadcast(Landroid/content/Context; I Landroid/content/Intent; I)Landroid/app/PendingIntent;}P1{Landroid/telephony/SmsManager;getDefault()Landroid/telephony/SmsManager;}I]B[P1{Ljava/util/List;clear()V}]B[P1{Landroid/telephony/SmsManager;divideMessage(Ljava/lang/String;)Ljava/util/ArrayList;}P1{Ljava/util/List;iterator()Ljava/util/Iterator;}P1{Ljava/util/Iterator;hasNext()Z}I]B[P1{Ljava/util/Iterator;next()Ljava/lang/Object;}]B[P1{Landroid/telephony/SmsManager;sendTextMessage(Ljava/lang/String; Ljava/lang/String; Ljava/lang/String; Landroid/app/PendingIntent; Landroid/app/PendingIntent;)V}G]B[P0{Ljava/lang/StringBuilder;}P1{Ljava/lang/StringBuilder;<init>(Ljava/lang/String;)V}P1{Ljava/lang/StringBuilder;append(Ljava/lang/Object;)Ljava/lang/StringBuilder;}P1{Ljava/lang/StringBuilder;toString()Ljava/lang/String;}P1{Landroid/util/Log;e(Ljava/lang/String; Ljava/lang/String;)I}P1{Landroid/widget/Toast;makeText(Landroid/content/Context; Ljava/lang/CharSequence; I)Landroid/widget/Toast;}P1{Landroid/widget/Toast;show()V}G] B[P0P1P2I]B[P1{Ljava/lang/String;length()I}I]B[R]B[P1{Landroid/content/Context;getSystemService(Ljava/lang/String;)Ljava/lang/Object;}P1{Landroid/telephony/TelephonyManager;getDeviceId()Ljava/lang/String;}P0P1P1P1P1P1P1]B[P0P1P1P1I]B[G]B[P1{Ljava/io/UnsupportedEncodingException;printStackTrace()V}G]B[P2G]B[]B[P1G]B[G]B[P1{Ljava/io/IOException;printStackTrace()V}G"
n.bennett(X)
n.bennett(Y)
n.bennett( "0" * 2000 )
#n.bennett( "B[F0P1P1P1P0F0P1P1P1P1P1P0F0P1F0P1P1P0P1P1P1P1R]", "B[F0P1F0P1P1]B[SP1P1F0F0F0I]B[F0F0P1G]" )
#n.bennett( "HELLO MY NAME IS ELSIM", "HELLO MY NAME IS ELSIM" )
#n.bennett( "HELLO MY NAME IS ELSIM", "HELLO MY NAME IS EL" )
#n.bennett( "HELLO MY NAME IS ELSIM", "WOOOOOOT" )
#n.bennett( "ELSIM ELSIM", "ANDROGUARD ANDROGUARD" )
def TestReorg( n ):
X = [ "B[P0{Ljava/util/Formatter;}P1{Ljava/util/Formatter;<init>()V}P2P2P0{Ljava/lang/StringBuilder;}P1{Ljava/lang/String;valueOf(Ljava/lang/Object;)Ljava/lang/String;}P1{Ljava/lang/StringBuilder;<init>(Ljava/lang/String;)V}P1{Ljava/lang/StringBuilder;append(Ljava/lang/String;)Ljava/lang/StringBuilder;}P1{Ljava/lang/StringBuilder;append(I)Ljava/lang/StringBuilder;}P1{Ljava/lang/StringBuilder;toString()Ljava/lang/String;}P1{Ljava/util/Formatter;format(Ljava/lang/String; [Ljava/lang/Object;)Ljava/util/Formatter;}P1{Ljava/util/Formatter;toString()Ljava/lang/String;}P1{Ljava/lang/String;getBytes()[B}P2P0{Ljava/net/URL;}P1{Ljava/net/URL;<init>(Ljava/lang/String;)V}P1{Ljava/net/URL;openConnection()Ljava/net/URLConnection;}P1{Ljava/net/HttpURLConnection;setDoOutput(Z)V}P1{Ljava/net/HttpURLConnection;setDoInput(Z)V}P1{Ljava/net/HttpURLConnection;setRequestMethod(Ljava/lang/String;)V}P1{Ljava/net/HttpURLConnection;getOutputStream()Ljava/io/OutputStream;}P0{Ljava/io/ByteArrayInputStream;}P1{Ljava/io/ByteArrayInputStream;<init>([B)V}P1{Ljava/io/ByteArrayInputStream;read([B II)I}I]",
"B[P1{Ljava/io/ByteArrayInputStream;close()V}P1{Ljava/io/OutputStream;close()V}P0{Ljava/io/ByteArrayOutputStream;}P1{Ljava/io/ByteArrayOutputStream;<init>()V}P0{Ljava/io/BufferedInputStream;}P1{Ljava/net/HttpURLConnection;getInputStream()Ljava/io/InputStream;}P1{Ljava/io/BufferedInputStream;<init>(Ljava/io/InputStream;)V}P1{Ljava/io/InputStream;read([BII)I}I]B[P1{Ljava/io/InputStream;close()V}P1{Ljava/io/ByteArrayOutputStream;size()I}I]B[P1{Landroid/content/Context;getSharedPreferences(Ljava/lang/String;I)Landroid/content/SharedPreferences;}P1{Landroid/content/SharedPreferences;edit()Landroid/content/SharedPreferences$Editor;}P1{Landroid/content/SharedPreferences$Editor;putInt(Ljava/lang/String;I)Landroid/content/SharedPreferences$Editor;}P1{Landroid/content/SharedPreferences$Editor;commit()Z}]",
"B[R]",
"B[P1{Ljava/io/OutputStream;write([BII)V}P1{Ljava/io/OutputStream;flush()V}G]B[P1{Ljava/io/ByteArrayOutputStream;write([B I I)V}G]" ]
print n.ncd("".join(j for j in X), "".join(j for j in X))
for i in itertools.permutations( X, len(X) ):
print n.ncd("".join(j for j in X), "".join(j for j in i))
TESTS = { "ZLIB" : ZLIB_COMPRESS,
"BZ2" : BZ2_COMPRESS,
"LZMA" : LZMA_COMPRESS,
"XZ" : XZ_COMPRESS,
"SNAPPY" : SNAPPY_COMPRESS,
"VCBLOCKSORT" : VCBLOCKSORT_COMPRESS,
# "SMAZ" : SMAZ_COMPRESS,
}
if __name__ == "__main__":
n = SIMILARITY( "elsim/similarity/libsimilarity/libsimilarity.so" )
#TestRDTSC( n )
#n.set_compress_type( BZ2_COMPRESS )
#n.set_compress_type( SNAPPY_COMPRESS )
#TestBenett( n )
TestEntropy( n, TESTS_CLOSED_SIGN, 0.04 )
TestEntropy( n, TESTS_DIFFERENT_SIGN, 0.8 )
for i in TESTS:
n.set_compress_type( TESTS[i] )
print "* ", i
TestReorg( n )
#TestProperties( n, TESTS_RANDOM_SIGN )
#TestSmallString( n, TESTS_SMALL_SIGN )
# TestProperties( n, RandomData() )
# Closed signature
#TestNCD( n, TESTS_CLOSED_SIGN, "closed" )
#TestNCS( n, TESTS_CLOSED_SIGN, "closed" )
#TestCMID( n, TESTS_CLOSED_SIGN, "closed" )
# Different signature
#TestNCD( n, TESTS_DIFFERENT_SIGN, "different" )
# Permutations
#TestNCDPermutations( n, "Android", 0.2 )
#n.clear_caches()
#TestNCSPermutations( n, "Androgu", 0.8 )
#n.clear_caches()
#TestCMIDPermutations( n, "Androgu", 0.8 )
#n.clear_caches()
print
# for j in range(1, 10):
# n.set_level( j )
# print "level", j,
# print "\t -->", n.ncd("F1M2M2M4F1", "F2M3M3M1F2"),
# print "\t -->", n.ncd("FMMMF", "MMFF"),
# print "\t -->", n.ncd("FMMMF", "FMMMF")
# print "\t bench -->", benchmark(n, "androgu")
|
lgpl-3.0
|
berkmancenter/mediacloud
|
apps/webapp-api/tests/python/auth/register/test_send_user_activation_token.py
|
1
|
1387
|
from mediawords.db import connect_to_db
from webapp.auth.register import add_user, send_user_activation_token
from webapp.auth.user import NewUser
from webapp.test.dummy_emails import TestDoNotSendEmails
class SendUserActivationTokenTestCase(TestDoNotSendEmails):
def test_send_user_activation_token(self):
db = connect_to_db()
email = '[email protected]'
password = 'userlogin123'
activation_url = 'http://activate.com/'
add_user(
db=db,
new_user=NewUser(
email=email,
full_name='Test user login',
has_consented=True,
notes='Test test test',
role_ids=[1],
active=True,
password=password,
password_repeat=password,
activation_url='', # user is active, no need for activation URL
),
)
# Existing user
send_user_activation_token(
db=db,
email=email,
activation_link=activation_url,
)
# Nonexistent user (call shouldn't fail because we don't want to reveal which users are in the system so we
# pretend that we've sent the email)
send_user_activation_token(
db=db,
email='[email protected]',
activation_link=activation_url,
)
|
agpl-3.0
|
nycz/urd
|
matrix.py
|
1
|
2130
|
def default_item():
return ('', (0,0))
class Matrix():
def __init__(self):
self.data = [[default_item()]]
def __str__(self):
return '\n'.join(repr(row) for row in self.data)
# return '\n'.join('\t|\t'.join(repr(x) for x in row) for row in self.data)
def __contains__(self, key):
if len(key) == 2:
x, y = key
return x in range(len(self.data[0])) and y in range(len(self.data))
else:
return False
def __getitem__(self, key):
x, y = key
return self.data[y][x]
def __setitem__(self, key, value):
x, y = key
if not value:
self.data[y][x] = default_item()
return
text, (w, h) = value
assert isinstance(text, str) and isinstance(w, int) and isinstance(h, int)
self.data[y][x] = value
def clear(self):
self.data = [[default_item()]]
def flip_orientation(self):
self.data = list(map(list, zip(*self.data)))
def count_rows(self):
return len(self.data)
def count_cols(self):
return len(self.data[0])
def add_row(self, pos=-1):
data = [default_item() for _ in range(len(self.data[0]))]
if pos == -1:
self.data.append(data)
else:
self.data.insert(pos, data)
def add_col(self, pos=-1):
for n in range(len(self.data)):
if pos == -1:
self.data[n].append(default_item())
else:
self.data[n].insert(pos, default_item())
def remove_row(self, pos):
del self.data[pos]
def remove_col(self, pos):
for n in range(len(self.data)):
del self.data[n][pos]
def move_row(self, oldpos, newpos):
row = self.data.pop(oldpos)
self.data.insert(newpos, row)
def move_col(self, oldpos, newpos):
for n in range(len(self.data)):
x = self.data[n].pop(oldpos)
self.data[n].insert(newpos, x)
def row(self, pos):
return self.data[pos]
def col(self, pos):
return [x[pos] for x in self.data]
|
mit
|
curtisstpierre/django
|
tests/flatpages_tests/test_middleware.py
|
290
|
8134
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Root</p>")
|
bsd-3-clause
|
nikste/tensorflow
|
tensorflow/contrib/testing/python/framework/fake_summary_writer.py
|
43
|
5024
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fake summary writer for unit tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.summary.writer import writer
from tensorflow.python.summary.writer import writer_cache
# TODO(ptucker): Replace with mock framework.
class FakeSummaryWriter(object):
"""Fake summary writer."""
_replaced_summary_writer = None
@classmethod
def install(cls):
if cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter already installed.')
cls._replaced_summary_writer = writer.FileWriter
writer.FileWriter = FakeSummaryWriter
writer_cache.FileWriter = FakeSummaryWriter
@classmethod
def uninstall(cls):
if not cls._replaced_summary_writer:
raise ValueError('FakeSummaryWriter not installed.')
writer.FileWriter = cls._replaced_summary_writer
writer_cache.FileWriter = cls._replaced_summary_writer
cls._replaced_summary_writer = None
def __init__(self, logdir, graph=None):
self._logdir = logdir
self._graph = graph
self._summaries = {}
self._added_graphs = []
self._added_meta_graphs = []
self._added_session_logs = []
@property
def summaries(self):
return self._summaries
def assert_summaries(self,
test_case,
expected_logdir=None,
expected_graph=None,
expected_summaries=None,
expected_added_graphs=None,
expected_added_meta_graphs=None,
expected_session_logs=None):
"""Assert expected items have been added to summary writer."""
if expected_logdir is not None:
test_case.assertEqual(expected_logdir, self._logdir)
if expected_graph is not None:
test_case.assertTrue(expected_graph is self._graph)
expected_summaries = expected_summaries or {}
for step in expected_summaries:
test_case.assertTrue(
step in self._summaries,
msg='Missing step %s from %s.' % (step, self._summaries.keys()))
actual_simple_values = {}
for step_summary in self._summaries[step]:
for v in step_summary.value:
# Ignore global_step/sec since it's written by Supervisor in a
# separate thread, so it's non-deterministic how many get written.
if 'global_step/sec' != v.tag:
actual_simple_values[v.tag] = v.simple_value
test_case.assertEqual(expected_summaries[step], actual_simple_values)
if expected_added_graphs is not None:
test_case.assertEqual(expected_added_graphs, self._added_graphs)
if expected_added_meta_graphs is not None:
test_case.assertEqual(expected_added_meta_graphs, self._added_meta_graphs)
if expected_session_logs is not None:
test_case.assertEqual(expected_session_logs, self._added_session_logs)
def add_summary(self, summ, current_global_step):
"""Add summary."""
if isinstance(summ, bytes):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summ)
summ = summary_proto
if current_global_step in self._summaries:
step_summaries = self._summaries[current_global_step]
else:
step_summaries = []
self._summaries[current_global_step] = step_summaries
step_summaries.append(summ)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_graph(self, graph, global_step=None, graph_def=None):
"""Add graph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
if graph_def is not None:
raise ValueError('Unexpected graph_def %s.' % graph_def)
self._added_graphs.append(graph)
def add_meta_graph(self, meta_graph_def, global_step=None):
"""Add metagraph."""
if (global_step is not None) and (global_step < 0):
raise ValueError('Invalid global_step %s.' % global_step)
self._added_meta_graphs.append(meta_graph_def)
# NOTE: Ignore global_step since its value is non-deterministic.
def add_session_log(self, session_log, global_step=None):
# pylint: disable=unused-argument
self._added_session_logs.append(session_log)
def flush(self):
pass
def reopen(self):
pass
|
apache-2.0
|
HenryHu/pybbs
|
Board.py
|
1
|
46243
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
from Util import Util
import Config
import struct
import json
import base64
import BCache
import User
import BRead
import BoardManager
import UserManager
import PostEntry
from Error import *
from Log import Log
from cstruct import CStruct
import fcntl
import time
import os
import re
import binascii
from errors import *
import digest
import store
import mmap
import searchquery
import fast_indexer
import Post
DEFAULT_GET_POST_COUNT = 20
BOARD_VOTEFLAG = 0x1
BOARD_NOZAPFLAG = 0x2
BOARD_READONLY = 0x4
BOARD_JUNK = 0x8
BOARD_ANNONY = 0x10
BOARD_OUTFLAG = 0x20 # /* for outgo boards */
BOARD_CLUB_READ = 0x40 # /*限制读的俱乐部*/
BOARD_CLUB_WRITE = 0x80 # /*限制写的俱乐部*/
BOARD_CLUB_HIDE = 0x100 # /*隐藏俱乐部*/
BOARD_ATTACH = 0x200 # /*可以使用附件的版面*/
BOARD_GROUP = 0x400 # /*目录*/
BOARD_EMAILPOST = 0x800 # /* Email 发文 */
BOARD_POSTSTAT = 0x1000 # /* 不统计十大 */
BOARD_NOREPLY = 0x2000 # /* 不可re文 */
BOARD_ANONYREPLY = 0x4000 # /* cannot reply anonymously */
class PostLog(CStruct):
# what the hell! this is board name, not id! why IDLEN+6!
# IDLEN = 12, BOARDNAMELEN = 30!
# anyway, no one uses it...
# change to IDLEN + 8 for padding
parser = struct.Struct('=%dsIii' % (Config.IDLEN + 8))
_fields = [['board', 1], 'groupid', 'date', 'number']
size = parser.size
class PostLogNew(CStruct):
parser = struct.Struct('=%ds%dsIii' % (Config.IDLEN + 6, Config.IDLEN + 6))
_fields = [['userid', 1], ['board', 1], 'groupid', 'date', 'number']
size = parser.size
class WriteDirArg:
def __init__(self):
self.filename = None
self.fileptr = None
self.ent = -1
self.fd = None # fd: file object
self.size = -1
self.needclosefd = False
self.needlock = True
def map_dir(self):
if self.fileptr is None:
if self.fd is None:
self.fd = open(self.filename, "r+b")
self.needclosefd = True
(self.fileptr, self.size) = Util.Mmap(
self.fd, mmap.PROT_READ | mmap.PROT_WRITE, mmap.MAP_SHARED)
if self.fileptr is None:
if self.needclosefd:
self.fd.close()
return False
return True
def free(self):
if self.needclosefd and self.fd is not None:
self.fd.close()
if self.fileptr is not None:
self.fileptr.close()
self.fileptr = None
class Board:
def __init__(self, bh, bs, idx):
self.header = bh
self.status = bs
self.name = bh.filename
self.index = idx
self.digest = digest.Digest(self, "0Announce/groups/%s" % bh.ann_path)
@staticmethod
def GET(svc, session, params, action):
if session is None:
raise Unauthorized('login first')
if not session.CheckScope('bbs'):
raise NoPerm("out of scope")
bo = None
if 'name' in params:
name = params['name']
bo = BoardManager.BoardManager.GetBoard(name)
if bo is None:
raise NotFound("board not found")
if (not bo.CheckReadPerm(session.GetUser())):
raise NoPerm("permission denied")
if action == 'list':
BoardManager.BoardManager.ListBoards(svc, session, params)
return
if bo is None:
raise WrongArgs("lack of board name")
if action == 'post_list':
bo.GetPostList(svc, session, params)
elif action == 'note' or action == 'secnote':
result = {'content': bo.GetNote((action == 'secnote'))}
svc.writedata(json.dumps(result))
elif action == 'thread_list':
bo.GetThreadList(svc, session, params)
else:
raise WrongArgs("unknown action")
@staticmethod
def POST(svc, session, params, action):
if session is None:
raise Unauthorized('login first')
if not session.CheckScope('bbs'):
raise NoPerm("out of scope")
bo = None
if 'name' in params:
name = params['name']
bo = BoardManager.BoardManager.GetBoard(name)
if bo is None:
raise NotFound("board %s not found" % name)
if (not bo.CheckReadPerm(session.GetUser())):
raise NoPerm("permission denied")
if (action == 'clear_unread'):
if (bo is None):
Board.ClearUnreadAll(session.GetUser())
else:
to = svc.get_int(params, 'to', 0)
bo.ClearUnread(session.GetUser(), to)
result = {"result": "ok"}
svc.writedata(json.dumps(result))
else:
raise WrongArgs("unknown action")
def GetBoardPath(self, filename=""):
return Config.BBS_ROOT + 'boards/%s/%s' % (self.name, filename)
def GetDirPath(self, mode='normal'):
if (mode == 'normal'):
return self.GetBoardPath() + '.DIR'
if (mode == 'digest'):
return self.GetBoardPath() + '.DIGEST'
if (mode == 'mark'):
return self.GetBoardPath() + '.MARK'
if (mode == 'deleted'):
return self.GetBoardPath() + '.DELETED'
if (mode == 'junk'):
return self.GetBoardPath() + '.JUNK'
if (mode == 'sticky'):
return self.GetBoardPath() + '.DINGDIR'
if (mode == 'thread'):
return self.GetBoardPath() + '.THREAD'
if (mode == 'origin'):
return self.GetBoardPath() + '.ORIGIN'
@staticmethod
def IsSortedMode(mode):
return (mode == 'normal' or mode == 'thread' or mode == 'mark' or
mode == 'origin' or mode == 'author' or mode == 'title' or
mode == 'superfilter')
def PostCount(self, mode='normal'):
dir_path = self.GetDirPath(mode)
try:
st = os.stat(dir_path)
return st.st_size / PostEntry.PostEntry.size
except:
return 0
def GetPostList(self, svc, session, params):
""" handle board/post_list
List posts in this board in mode 'mode'.
Start listing from post #'start', till post #'end',
return at most 'count' posts.
"""
mode = Util.GetString(params, 'mode', 'normal')
start = Util.GetInt(params, 'start')
end = Util.GetInt(params, 'end')
count = Util.GetInt(params, 'count')
allow_empty = not start and not end
if (mode == 'normal'):
total = self.status.total
else:
total = self.PostCount(mode)
start, end = Util.CheckRange(start, end, count, DEFAULT_GET_POST_COUNT, total)
if ((start <= end) and (start >= 1) and (end <= total)):
bread = BRead.BReadMgr.LoadBRead(session.GetUser().name)
if (bread is not None):
bread.Load(self.name)
if (mode == 'normal' or mode == 'digest' or mode == 'mark' or
mode == 'sticky' or mode == 'thread' or mode == 'origin'):
dirf = open(self.GetDirPath(mode), 'rb')
post = {}
first = True
result = '[\n'
for i in range(start - 1, end):
pe = self.GetPostEntry(i, mode, dirf)
if (pe is None):
continue
if (not first):
result += ',\n'
first = False
post = pe.GetInfoExtended(session.GetUser(), self, 'post')
post['id'] = i + 1
read = True
if (bread is not None):
read = not bread.QueryUnread(pe.id, self.name)
post['read'] = read
# post['filename'] = pe.filename
result += json.dumps(post, 'utf-8')
result += '\n]'
svc.writedata(result)
dirf.close()
else:
if allow_empty:
svc.writedata('[]')
else:
raise OutOfRange('out of range')
return
def GetPostEntry(self, postid, mode='normal', fd=None):
pe = None
if (postid < 0):
return None
try:
if (fd is None):
dirf = open(self.GetDirPath(mode), 'rb')
dirf.seek(postid * PostEntry.PostEntry.size)
pe = PostEntry.PostEntry(dirf.read(PostEntry.PostEntry.size))
dirf.close()
else:
fd.seek(postid * PostEntry.PostEntry.size)
pe = PostEntry.PostEntry(fd.read(PostEntry.PostEntry.size))
return pe
except Exception:
return None
def GetPost(self, svc, session, params, id, start, count):
mode = Util.GetString(params, 'mode', 'normal')
if (mode == 'junk' or mode == 'deleted'):
raise NoPerm("invalid mode!")
if ((id >= 1) and (id <= self.status.total)):
pe = self.GetPostEntry(id - 1, mode)
postpath = self.GetBoardPath() + pe.filename
post = pe.GetInfo('post')
post['id'] = id
postinfo = Post.Post(postpath, pe)
post = dict(post.items() + postinfo.GetInfo(start, count).items())
if (post['picattach'] or post['otherattach']):
post['attachlink'] = Post.Post.GetAttachLink(session, self, pe)
svc.writedata(json.dumps(post))
bread = BRead.BReadMgr.LoadBRead(session.GetUser().name)
bread.Load(self.name)
bread.MarkRead(pe.id, self.name)
else:
raise OutOfRange("invalid post id")
return
def GetNextPostReq(self, svc, session, params, id):
direction = Util.GetString(params, 'direction', 'forward')
bfwd = True
if (direction == 'backward'):
bfwd = False
# idonly / compact / detailed
mode = svc.get_str(params, 'mode', 'idonly')
content_len = svc.get_int(params, 'max_lines', 25)
last_one = bool(svc.get_int(params, 'last_one', 0))
only_new = bool(svc.get_int(params, 'only_new', 0))
(next_id, next_xid) = self.GetNextPost(id, bfwd, last_one, only_new,
session.GetUser())
if next_id < 1:
raise ServerError("fail to get next post")
else:
if mode == 'idonly':
nextinfo = {}
nextinfo['nextid'] = next_id
nextinfo['nextxid'] = next_xid
svc.writedata(json.dumps(nextinfo))
else:
post_info = self.ObtainPost(session, next_id, next_xid,
mode, content_len)
retry = 0
while post_info is None and retry < 5:
(next_id, next_xid) = self.GetNextPost(id, bfwd, last_one,
only_new, session.GetUser())
if next_id < 1:
raise ServerError("fail to get next post")
post_info = self.ObtainPost(session, next_id, next_xid,
mode, content_len)
retry += 1
if post_info is None:
raise ServerError("fail to get next post, retry exhausted")
svc.writedata(json.dumps(post_info))
def GetNextPost(self, id, forward, last_one, only_new, user):
if ((id >= 1) and (id <= self.status.total)):
last_post = -1
last_xid = -1
dirf = open(self.GetDirPath("normal"), 'rb')
if (dirf is None):
raise ServerError("fail to load post")
pe = self.GetPostEntry(id - 1, "normal", dirf)
if (forward):
i = id + 1
else:
i = id - 1
if (only_new):
bread = BRead.BReadMgr.LoadBRead(user.name)
while ((i >= 1) and (i <= self.status.total)):
pxe = self.GetPostEntry(i - 1, "normal", dirf)
if (pxe.groupid == pe.groupid):
if ((only_new and bread.QueryUnread(pxe.id, self.name)) or
(not only_new)):
if (not last_one):
dirf.close()
return (i, pxe.id)
else:
last_post = i
last_xid = pxe.id
if (pxe.groupid == pxe.id): # original post
break
if (forward):
i = i + 1
else:
i = i - 1
dirf.close()
if (last_one):
if (last_post != -1):
return (last_post, last_xid)
else:
raise NotFound("post not found")
else:
raise NotFound("post not found")
else:
raise OutOfRange("invalid post id")
def GetAttachmentReq(self, svc, session, params, id):
mode = Util.GetString(params, 'mode', 'normal')
offset = Util.GetInt(params, 'offset')
if (offset <= 0):
raise WrongArgs("invalid or lacking offset")
if ((id >= 1) and (id <= self.status.total)):
pe = self.GetPostEntry(id - 1, mode)
attach = Post.Post.ReadAttachment(self.GetBoardPath() + pe.filename, offset)
attach = {'name': attach[0], 'content': base64.b64encode(attach[1])}
svc.writedata(json.dumps(attach))
else:
raise OutOfRange("invalid post id")
def GetInfo(self):
""" Get information of this board. """
rboard = {}
rboard['name'] = self.name
title = self.GetTitle()
result = re.match('([0-9 ]*)\[([^]]*)\] *([^ ]*) *(.*)', title)
rboard['major'] = result.group(1)
rboard['minor'] = result.group(2)
rboard['outpost'] = result.group(3)
rboard['desc'] = result.group(4)
rboard['id'] = self.index
rboard['BM'] = self.GetBM()
rboard['total'] = self.GetTotal()
rboard['currentusers'] = self.GetCurrentUsers()
rboard['anony_post'] = int(self.CanAnonyPost())
rboard['anony_reply'] = int(self.CanAnonyReply())
rboard['group'] = self.GetGroup()
if self.IsDir():
rboard['isdir'] = self.IsDir()
rboard['child_count'] = self.GetChildCount()
return rboard
def GetInfoWithUser(self, user):
""" Get information of this board, including user-specific parts. """
rboard = self.GetInfo()
rboard['read'] = not self.GetUnread(user)
return rboard
def GetInfoWithUserJSON(self, user):
""" Get information in JSON form, including user-specific parts. """
return json.dumps(self.GetInfoWithUser(user))
def GetInfoJSON(self):
""" Get information in JSON form. """
return json.dumps(self.GetInfo())
def CheckReadPerm(self, user):
""" Check if user 'user' can read this board. """
if (self.header is None):
return False
level = self.header.level
if ((level & User.PERM_POSTMASK != 0) or (user.HasPerm(level)) or (level & User.PERM_NOZAP != 0)):
if (self.CheckFlag(BOARD_CLUB_READ)):
if (user.HasPerm(User.PERM_OBOARDS) and user.HasPerm(User.PERM_SYSOP)):
return True
if (self.header.clubnum <= 0 or self.header.clubnum >= Config.MAXCLUB):
return False
if (user.CanReadClub(self.header.clubnum)):
return True
return False
else:
return True
return False
def CheckFlag(self, flag):
""" Check whether this board has flag 'flag'. """
if (self.header.flag & flag != 0):
return True
return False
def CheckPostPerm(self, user):
""" Check if user 'user' can post on this board. """
if (self.header is None):
return False
if (self.CheckFlag(BOARD_GROUP)):
return False
if (not user.HasPerm(User.PERM_POST)):
if (user.name == 'guest'):
return False
if (self.name == "BBShelp"):
return True # not exist here
if (not user.HasPerm(User.PERM_LOGINOK)):
return False
if (self.name == "Complain"):
return True # not exist here
if (self.name == "sysop"):
return True
if (self.name == "Arbitration"):
return True # not exist here
return False
if (self.header.level == 0 or user.HasPerm(self.header.level & ~User.PERM_NOZAP & ~User.PERM_POSTMASK)):
if (self.CheckFlag(BOARD_CLUB_WRITE)):
if (self.header.clubnum <= 0 or self.header.clubnum >= Config.MAXCLUB):
return False
return user.CanPostClub(self.header.clubnum)
else:
return True
else:
return False
def CheckSeePerm(self, user):
""" Check if user 'user' can see this board. """
if (self.header is None):
return False
if (user is None):
if (self.header.title_level != 0):
return False
else:
if (not user.HasPerm(User.PERM_OBOARDS) and
self.header.title_level != 0 and
self.header.title_level != user.GetTitle()):
return False
level = self.header.level
if (level & User.PERM_POSTMASK != 0 or
(user is None and level == 0) or
(user is not None and user.HasPerm(level)) or
(level & User.PERM_NOZAP != 0)):
if (self.CheckFlag(BOARD_CLUB_HIDE)):
if (user is None):
return False
if (user.HasPerm(User.PERM_OBOARDS)):
return True
return self.CheckReadPerm(user)
return True
return False
def GetTitle(self):
""" Get this board's title. """
return Util.gbkDec(self.header.title)
def GetBM(self):
""" Get this board's list of BMs. """
return self.header.BM
def GetTotal(self):
return self.status.total
def GetCurrentUsers(self):
return self.status.currentusers
def ModCurrentUsers(self, mod):
self.status.currentusers += mod
def GetLastPostId(self):
return self.status.lastpost
def LoadBReadFor(self, user):
bread = BRead.BReadMgr.LoadBRead(user.name)
if (bread is None):
return None
succ = bread.Load(self.name)
if (not succ):
return None
return bread
def GetUnread(self, user):
bread = self.LoadBReadFor(user)
if (bread is None):
return True
return bread.QueryUnread(self.GetLastPostId(), self.name)
def ClearUnread(self, user, to=0):
bread = self.LoadBReadFor(user)
if (bread is None):
return True
if (to == 0):
return bread.Clear(self.name)
else:
return bread.ClearTo(to, self.name)
@staticmethod
def ClearUnreadAll(user):
for i in xrange(BCache.BCache.GetBoardCount()):
board = BoardManager.BoardManager.GetBoardByIndex(i)
if (board is not None):
board.ClearUnread(user)
def CheckReadonly(self):
return self.CheckFlag(BOARD_READONLY)
def CheckNoReply(self):
return self.CheckFlag(BOARD_NOREPLY)
def CanAnonyPost(self):
return self.CheckFlag(BOARD_ANNONY)
def CanAnonyReply(self):
return self.CheckFlag(BOARD_ANONYREPLY)
def CanPostAttach(self):
return self.CheckFlag(BOARD_ATTACH)
def IsJunkBoard(self):
return self.CheckFlag(BOARD_JUNK)
def IsSysmailBoard(self):
return self.name == Config.SYSMAIL_BOARD
def DontStat(self):
return self.CheckFlag(BOARD_POSTSTAT)
def PreparePostArticle(self, user, refile, anony, attach):
detail = {}
if (refile is not None):
if (self.CheckNoReply()):
raise NoPerm("can't reply in this board")
if (refile.CannotReply()):
raise NoPerm("can't reply this post")
if (self.CheckReadonly()):
Log.debug("PostArticle: fail: readonly")
raise NoPerm("board is readonly")
if (not self.CheckPostPerm(user)):
Log.debug("PostArticle: fail: %s can't post on %s" % (user.name, self.name))
raise NoPerm("no permission to post")
if (self.DeniedUser(user)):
if (not user.HasPerm(User.PERM_SYSOP)):
Log.debug("PostArticle: fail: %s denied on %s" % (user.name, self.name))
raise NoPerm("user denied")
if anony:
if not self.MayAnonyPost(user, refile):
detail['anonymous'] = 1
if attach:
if not self.CanPostAttach():
detail['attachment'] = 1
return detail
def MayAnonyPost(self, user, refile):
mycrc = (~binascii.crc32(user.name, 0xffffffff)) & 0xffffffff
may_anony = False
if (refile is None): # not in reply mode
if (self.CanAnonyPost()):
may_anony = True
else:
if (self.CanAnonyPost() and mycrc == refile.rootcrc):
may_anony = True
else:
if (self.CanAnonyReply()):
may_anony = True
return may_anony
def PostArticle(self, user, title, content, refile, signature_id, anony, mailback,
session, attach, ignoreperm=False):
# check permission
if not ignoreperm:
self.PreparePostArticle(user, refile, anony, attach)
# filter title: 'Re: ' and '\ESC'
# title = title.replace('\033', ' ')
title = re.sub('[\x00-\x19]', ' ', title)
if (refile is None):
while (title[:4] == "Re: "):
title = title[4:]
if anony:
if not self.MayAnonyPost(user, refile):
anony = False
if attach:
if not self.CanPostAttach():
attach = None
post_file = PostEntry.PostEntry()
# Log.debug("PostArticle title: %s anony: %r" % (title, anony))
post_file.filename = self.GetPostFilename(False)
if (not anony):
post_file.owner = user.name
else:
post_file.owner = self.name
post_file.realowner = user.name
if (mailback):
post_file.SetMailBack(True)
content_encoded = Util.gbkEnc(content)
try:
with open(self.GetBoardPath() + post_file.filename, "ab") as f:
Post.Post.WriteHeader(f, user, False, self, title, anony, 0, session)
f.write(content_encoded)
if (not anony):
Post.Post.AddSig(f, user, signature_id)
except IOError:
Log.error("PostArticle: write post failed!")
os.unlink(self.GetBoardPath() + post_file.filename)
raise ServerError("fail to write post file")
post_file.eff_size = len(content_encoded)
if (refile is not None):
post_file.rootcrc = refile.rootcrc
if (refile.IsRootPostAnonymous()):
post_file.SetRootPostAnonymous(True)
else:
mycrc = (~binascii.crc32(user.name, 0xffffffff)) & 0xffffffff
post_file.rootcrc = mycrc
if (anony):
post_file.SetRootPostAnonymous(True)
if (signature_id == 0):
has_sig = False
else:
has_sig = True
Post.Post.AddLogInfo(self.GetBoardPath(post_file.filename), user, session, anony, has_sig)
post_file.title = Util.gbkEnc(title)
# TODO: outpost ('SS')
post_file.innflag = 'LL'
if attach:
try:
post_file.attachment = len(attach)
except:
post_file.attachment = 0
else:
post_file.attachment = 0
self.AfterPost(user, post_file, refile, anony)
if attach:
try:
for att in attach:
filename = att['name']
tmpfile = att['store_id']
if (not store.Store.verify_id(tmpfile)):
continue
tmpfile = store.Store.path_from_id(tmpfile)
Post.Post.AddAttach(self.GetBoardPath(post_file.filename),
filename, tmpfile)
except:
pass
if (not self.IsJunkBoard()):
user.AddNumPosts()
return True
def AfterPost(self, user, post_file, re_file, anony):
bdir = self.GetDirPath('normal')
try:
with open(bdir, "ab") as bdirf:
fcntl.flock(bdirf, fcntl.LOCK_EX)
try:
nowid = self.GetNextId()
if (nowid < 0):
raise IOError()
post_file.id = nowid
if (re_file is None):
post_file.groupid = nowid
post_file.reid = nowid
else:
post_file.groupid = re_file.groupid
post_file.reid = re_file.id
post_file.posttime = 0 # not used
# no seek: append mode
bdirf.write(post_file.pack())
finally:
fcntl.flock(bdirf, fcntl.LOCK_UN)
except IOError as e:
post_fname = self.GetBoardPath() + post_file.filename
os.unlink(post_fname)
raise e
self.UpdateLastPost()
bread = BRead.BReadMgr.LoadBRead(user.name)
if (bread is not None):
bread.Load(self.name)
bread.MarkRead(post_file.id, self.name)
if (re_file is not None):
if (re_file.NeedMailBack()):
# mail back, not implemented
pass
if (user is not None and anony):
# ANONYDIR: not used, ignore it
pass
if (user is not None and not anony):
self.WritePosts(user, post_file.groupid)
if (post_file.id == post_file.groupid):
# self.RegenSpecial('origin': later)
self.SetUpdate('origin', True)
self.SetUpdate('title', True)
if (post_file.IsMarked()):
self.SetUpdate('mask', True)
# log: later
return
def UpdateLastPost(self):
(post_cnt, last_post) = self.GetLastPost()
# self.status.unpack()
self.status.lastpost = last_post
self.status.total = post_cnt
# self.status.pack()
return True
def GetLastPost(self):
bdir = self.GetDirPath('normal')
try:
with open(bdir, "rb") as f:
f.seek(0, 2)
size = f.tell()
post_cnt = size / PostEntry.PostEntry.size
if (post_cnt <= 0):
last_post = 0
post_cnt = 0
else:
f.seek((post_cnt - 1) * PostEntry.PostEntry.size, 0)
post_file = PostEntry.PostEntry(f.read(PostEntry.PostEntry.size))
last_post = post_file.id
return (post_cnt, last_post)
except IOError:
return (0, 0)
def IsNormalBoard(self):
if (self.name == Config.DEFAULTBOARD):
return True
bh = self.header
ret = True
while (ret):
ret = (not (bh.level & User.PERM_SYSOP) and
not (bh.flag & BOARD_CLUB_HIDE) and
not (bh.flag & BOARD_CLUB_READ))
if (bh.title_level != 0):
ret = False
if (not ret or (bh.group == 0)):
break
bh = BCache.BoardHeader(bh.group)
return ret
def WritePosts(self, user, groupid):
if (self.name != Config.BLESS_BOARD and
(self.DontStat() or (not self.IsNormalBoard()))):
return 0
now = time.time()
postlog = PostLog()
postlog.board = self.name
postlog.groupid = groupid
postlog.date = now
postlog.number = 1
postlog_new = PostLogNew()
postlog_new.board = self.name
postlog_new.groupid = groupid
postlog_new.date = now
postlog_new.number = 1
postlog_new.userid = user.name
xpostfile = "%s/tmp/Xpost/%s" % (Config.BBS_ROOT, user.name)
log = True
try:
with open(xpostfile, "rb") as fp:
while (True):
pl_data = fp.read(PostLog.size)
if (len(pl_data) < PostLog.size):
break
pl = PostLog(pl_data)
if (pl.groupid == groupid and pl.board == self.name):
log = False
break
except IOError:
pass
if (log):
Util.AppendRecord(xpostfile, postlog.pack())
Util.AppendRecord(Config.BBS_ROOT + "/.newpost", postlog.pack())
Util.AppendRecord(Config.BBS_ROOT + "/.newpost_new", postlog_new.pack())
return 0
def GetUpdate(self, item):
myid = BCache.BCache.GetBoardNum(self.name)
if (myid == 0):
return False
status = BCache.BoardStatus(myid)
value = 0
if (item == 'origin'):
value = status.updateorigin
elif (item == 'mark'):
value = status.updatemark
elif (item == 'title'):
value = status.updatetitle
if (value == 0):
return False
return True
def SetUpdate(self, item, need_update):
myid = BCache.BCache.GetBoardNum(self.name)
if (myid == 0):
return False
value = 0
if (need_update):
value = 1
status = BCache.BoardStatus(myid)
if (item == 'origin'):
status.updateorigin = value
elif (item == 'mark'):
status.updatemark = value
elif (item == 'title'):
status.updatetitle = value
# status.pack()
return True
def GetPostFilename(self, use_subdir):
return Post.Post.GetPostFilename(self.GetBoardPath(), use_subdir)
def DeniedUser(self, user):
if (Util.SeekInFile(self.GetBoardPath() + "deny_users", user.name)):
return True
if (Util.SeekInFile(self.GetBoardPath() + "anony_deny_users", user.name)):
return True
return False
def GetNextId(self):
return BCache.BCache.GetNextID(self.name)
def FindPost(self, id, xid, mode):
if id > 0:
post = self.GetPostEntry(id - 1, mode)
for i in xrange(5):
if post is None:
break
if post.id == xid:
return (post, id)
if post.id < xid:
id += 1
else:
id -= 1
post = self.GetPostEntry(id - 1, mode)
count = self.PostCount(mode)
start = 1
end = count
while end >= start:
mid = (start + end) / 2
post = self.GetPostEntry(mid - 1, mode)
if post.id < xid:
start = mid + 1
elif post.id > xid:
end = mid - 1
else:
return (post, mid)
return (None, 0)
def QuotePost(self, svc, post_id, xid, include_mode, index_mode, include_data):
if (index_mode == 'junk' or index_mode == 'deleted'):
raise NoPerm("invalid index_mode!")
(post, _) = self.FindPost(post_id, xid, index_mode)
if (post is None):
raise NotFound("referred post not found")
quote = Post.Post.DoQuote(include_mode, self.GetBoardPath(post.filename), True, include_data)
orig_title = ''
if (post.title[:3] == "Re:"):
# Re: <title>
orig_title = post.title[4:]
elif (post.title[:3] == u"├ ".encode('gbk')):
orig_title = post.title[3:]
elif (post.title[:3] == u"└ ".encode('gbk')):
orig_title = post.title[3:]
else:
orig_title = post.title
if include_mode == 'C':
quote_title = orig_title
else:
quote_title = "Re: " + orig_title
quote_obj = {}
quote_obj['title'] = Util.gbkDec(quote_title)
quote_obj['content'] = Util.gbkDec(quote)
svc.writedata(json.dumps(quote_obj))
def GetNote(self, secret=False):
if (not secret):
notes_path = "%s/vote/%s/notes" % (Config.BBS_ROOT, self.name)
else:
notes_path = "%s/vote/%s/secnotes" % (Config.BBS_ROOT, self.name)
try:
with open(notes_path, 'rb') as f:
return Util.gbkDec(f.read())
except:
raise NotFound('board note does not exist')
@staticmethod
def IsBM(user, bmstr):
if (user.IsSuperBM() or user.IsSysop()):
return True
if (not user.IsBM()):
return False
return Board.IsBMs(user.name, bmstr)
@staticmethod
def IsBMs(userid, bmstr):
for item in re.split('[,: ;|&()\0\n]', bmstr):
if (userid == item):
return True
return False
def IsMyBM(self, user):
bmstr = self.GetBM()
return Board.IsBM(user, bmstr)
def IsDir(self):
return (self.header.flag & BOARD_GROUP) != 0
def GetChildCount(self):
if not self.IsDir():
return 0
return self.header.adv_club # it's a union, in fact
def GetGroup(self):
return self.header.group
def DelPost(self, user, post_id, post_xid, mode='normal'):
# from del_post()
if post_id > self.PostCount(mode):
raise WrongArgs("out of range")
if self.name == "syssecurity" or self.name == "junk" or self.name == "deleted":
raise WrongArgs("invalid board: %s" % self.name)
if mode == "junk" or mode == "deleted":
raise WrongArgs("invalid mode: %s" % mode)
(post_entry, new_post_id) = self.FindPost(post_id, post_xid, mode)
if post_entry is None:
raise NotFound("post not found")
if not post_entry.CanBeDeleted(user, self):
raise NoPerm("permission denied")
owned = user.IsOwner(post_entry)
arg = WriteDirArg()
arg.filename = self.GetDirPath(mode)
if mode == 'normal' or mode == 'digest':
arg.ent = new_post_id
# from do_del_post()
succ = self.PrepareWriteDir(arg, mode, post_entry)
if not succ:
raise ServerError("fail to prepare directory write")
self.DeleteEntry(arg.fileptr, arg.ent, arg.size, arg.fd)
Util.FUnlock(arg.fd)
self.SetUpdate('title', True)
self.CancelPost(user, post_entry, owned, True)
self.UpdateLastPost()
if post_entry.IsMarked():
self.SetUpdate('mark', True)
if (mode == 'normal' and
not (post_entry.IsMarked() and
post_entry.CannotReply() and
post_entry.IsForwarded()) and
not self.IsJunkBoard()):
if owned:
user.DecNumPosts()
elif "." not in post_entry.owner and Config.BMDEL_DECREASE:
user = UserManager.UserManager.LoadUser(post_entry.owner)
if user is not None and not self.IsSysmailBoard():
user.DecNumPosts()
arg.free()
def PrepareWriteDir(self, arg, mode='normal', post_entry=None):
if not arg.map_dir():
return False
if arg.needlock:
Util.FLock(arg.fd)
if post_entry:
(newpost, newid) = self.FindPost(arg.ent, post_entry.id, mode)
if newpost is None:
Util.FUnlock(arg.fd)
return False
arg.ent = newid
return True
def DeleteEntry(self, fileptr, entry, size, fd):
dst_pos = PostEntry.PostEntry.size * (entry - 1)
src_pos = PostEntry.PostEntry.size * entry
new_size = size - PostEntry.PostEntry.size
fileptr[dst_pos:new_size] = fileptr[src_pos:size]
os.ftruncate(fd.fileno(), new_size)
def UpdatePostEntry(self, post_entry, post_id=0, mode='normal'):
arg = WriteDirArg()
arg.filename = self.GetDirPath(mode)
succ = self.PrepareWriteDir(arg, mode, post_entry)
if not succ:
return False
try:
pos = PostEntry.PostEntry.size * (arg.ent - 1)
arg.fileptr[pos:pos+PostEntry.PostEntry.size] = post_entry.pack()
finally:
arg.free()
return True
def CancelPost(self, user, entry, owned, append):
# TODO: delete mail
# rename post file
new_filename = entry.filename
rep_char = 'J' if owned else 'D'
if new_filename[1] == '/':
new_filename = entry.filename[0] + rep_char + entry.filename[2:]
else:
new_filename = rep_char + entry.filename[1:]
oldpath = self.GetBoardPath(entry.filename)
newpath = self.GetBoardPath(new_filename)
os.rename(oldpath, newpath)
entry.filename = new_filename
new_title = "%-32.32s - %s" % (entry.title, user.name)
if not append:
entry.title = new_title
entry.UpdateDeleteTime()
# TODO: flush back entry changes
else:
postfile = PostEntry.PostEntry()
postfile.filename = new_filename
postfile.owner = entry.owner
postfile.id = entry.id
postfile.groupid = entry.groupid
postfile.reid = entry.reid
postfile.attachment = entry.attachment
postfile.title = new_title
postfile.UpdateDeleteTime()
new_dir = self.GetDirPath('junk' if owned else 'deleted')
Util.AppendRecord(new_dir, postfile.pack())
return True
def EditPost(self, session, post_xid, post_id=0, new_title=None,
content=None, mode='normal', attach_to_remove=set(),
add_attach_list=[]):
(post_entry, post_id) = self.FindPost(post_id, post_xid, mode)
if post_entry is None:
raise NotFound("post not found")
if (self.name == "syssecurity" or self.name == "junk" or self.name == "deleted"):
raise WrongArgs("can't edit post in board %s" % self.name)
if mode == "junk" or mode == "deleted":
raise WrongArgs("can't edit post in mode %s" % mode)
if self.CheckReadonly():
raise WrongArgs("board %s is read-only" % self.name)
user = session.GetUser()
if not post_entry.CanBeEdit(user, self):
raise NoPerm("you can't edit this post")
if self.DeniedUser(user):
raise NoPerm("you can't edit on board %s" % self.name)
post_path = self.GetBoardPath(post_entry.filename)
post = Post.Post(post_path, post_entry)
if content is None:
content = post.GetBody()
first_attach_pos = 0
need_update = False
new_post_path = post_path + ".new"
if new_title is not None and new_title != Util.gbkDec(post_entry.title):
post_entry.title = Util.gbkEnc(new_title)
need_update = True
with open(post_path, "r+b") as postf:
Util.FLock(postf)
try:
attach_list = post.GetAttachList()
newpost = Post.Post(new_post_path, post_entry)
newpost.open()
try:
newpost.EditHeaderFrom(post, new_title)
size_header = newpost.pos()
newpost.EditContent(content, session, post)
content_len = newpost.pos() - size_header
if content_len != post_entry.eff_size:
post_entry.eff_size = content_len
need_update = True
# copy original attachments
orig_attach_id = 0
for attach_entry in attach_list:
if orig_attach_id not in attach_to_remove:
try:
attach_pos = newpost.AppendAttachFrom(post, attach_entry)
if first_attach_pos == 0:
first_attach_pos = attach_pos
except:
pass
orig_attach_id += 1
# add new attachments
for attach_entry in add_attach_list:
filename = attach_entry['name']
tmpfile = attach_entry['store_id']
if (not store.Store.verify_id(tmpfile)):
continue
tmpfile = store.Store.path_from_id(tmpfile)
try:
attach_pos = newpost.AddAttachSelf(filename, tmpfile)
if first_attach_pos == 0:
first_attach_pos = attach_pos
except Exception as e:
Log.warn("fail to add attach: %r" % e)
finally:
newpost.close()
os.rename(new_post_path, post_path)
finally:
try:
os.remove(new_post_path)
except:
pass
Util.FUnlock(postf)
if first_attach_pos != post_entry.attachment:
post_entry.attachment = first_attach_pos
need_update = True
if need_update:
# fail to update post info is not that important
if not self.UpdatePostEntry(post_entry, post_id, mode):
Log.warn("fail to update post entry!")
def SearchPost(self, user, start_id, forward, query_expr, count=1):
if count > Config.SEARCH_COUNT_LIMIT:
count = Config.SEARCH_COUNT_LIMIT
result = []
curr_id = start_id
result_count = 0
query = searchquery.SearchQuery(query_expr)
while True:
post_entry = self.GetPostEntry(curr_id - 1)
if post_entry is None:
return result
if query.match(self, post_entry):
info = post_entry.GetInfoExtended(user, self)
info['id'] = curr_id
result.append(info)
result_count += 1
if result_count == count:
return result
if forward:
curr_id += 1
else:
curr_id -= 1
return result
def GetThreadList(self, svc, session, param):
""" handle board/thread_list
List posts in the thread 'tid'.
From the 'start'th post in the thread, return 'count' results.
If mode is:
'idonly': only return id and xid
'compact': return post info
'detailed': also return post content, return at most
'max_lines' lines for each post
Return a list of posts. """
start = svc.get_int(param, 'start', 0)
count = svc.get_int(param, 'count', 10)
tid = svc.get_int(param, 'tid')
mode = svc.get_str(param, 'mode', 'idonly')
content_len = svc.get_int(param, 'max_lines', 25)
result = fast_indexer.query_by_tid(svc.server.fast_indexer_state,
self.name, tid, start, count)
ret = []
for (post_id, post_xid) in result:
if mode == 'idonly':
ret.append({'id': post_id, 'xid': post_xid})
else:
post_info = self.ObtainPost(session, post_id, post_xid,
mode, content_len)
if post_info is None:
# deleted after last index?
continue
ret.append(post_info)
svc.writedata(json.dumps({'result': 'ok', 'list': ret}))
def ObtainPost(self, session, post_id, post_xid, mode, content_len):
(post_entry, cur_post_id) = self.FindPost(
post_id, post_xid, 'normal')
if post_entry is None:
return None
post_info = post_entry.GetInfoExtended(session.GetUser(), self)
post_info['id'] = cur_post_id
if mode == 'detailed':
postpath = self.GetBoardPath() + post_entry.filename
postobj = Post.Post(postpath, post_entry)
post_info = dict(post_info.items() + postobj.GetInfo(0, content_len).items())
if (post_info['picattach'] or post_info['otherattach']):
post_info['attachlink'] = Post.Post.GetAttachLink(
session, self, post_entry)
# mark the post as read
# only in detailed mode
bread = BRead.BReadMgr.LoadBRead(session.GetUser().name)
bread.Load(self.name)
bread.MarkRead(post_xid, self.name)
return post_info
|
bsd-2-clause
|
collective/eden
|
static/scripts/tools/jsmin.py
|
513
|
7471
|
#!/usr/bin/python
# This code is original from jsmin by Douglas Crockford, it was translated to
# Python by Baruch Even. The original code had the following copyright and
# license.
#
# /* jsmin.c
# 2007-01-08
#
# Copyright (c) 2002 Douglas Crockford (www.crockford.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# The Software shall be used for Good, not Evil.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# */
from StringIO import StringIO
def jsmin(js):
ins = StringIO(js)
outs = StringIO()
JavascriptMinify().minify(ins, outs)
str = outs.getvalue()
if len(str) > 0 and str[0] == '\n':
str = str[1:]
return str
def isAlphanum(c):
"""return true if the character is a letter, digit, underscore,
dollar sign, or non-ASCII character.
"""
return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or
(c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\' or (c is not None and ord(c) > 126));
class UnterminatedComment(Exception):
pass
class UnterminatedStringLiteral(Exception):
pass
class UnterminatedRegularExpression(Exception):
pass
class JavascriptMinify(object):
def _outA(self):
self.outstream.write(self.theA)
def _outB(self):
self.outstream.write(self.theB)
def _get(self):
"""return the next character from stdin. Watch out for lookahead. If
the character is a control character, translate it to a space or
linefeed.
"""
c = self.theLookahead
self.theLookahead = None
if c == None:
c = self.instream.read(1)
if c >= ' ' or c == '\n':
return c
if c == '': # EOF
return '\000'
if c == '\r':
return '\n'
return ' '
def _peek(self):
self.theLookahead = self._get()
return self.theLookahead
def _next(self):
"""get the next character, excluding comments. peek() is used to see
if a '/' is followed by a '/' or '*'.
"""
c = self._get()
if c == '/':
p = self._peek()
if p == '/':
c = self._get()
while c > '\n':
c = self._get()
return c
if p == '*':
c = self._get()
while 1:
c = self._get()
if c == '*':
if self._peek() == '/':
self._get()
return ' '
if c == '\000':
raise UnterminatedComment()
return c
def _action(self, action):
"""do something! What you do is determined by the argument:
1 Output A. Copy B to A. Get the next B.
2 Copy B to A. Get the next B. (Delete A).
3 Get the next B. (Delete B).
action treats a string as a single character. Wow!
action recognizes a regular expression if it is preceded by ( or , or =.
"""
if action <= 1:
self._outA()
if action <= 2:
self.theA = self.theB
if self.theA == "'" or self.theA == '"':
while 1:
self._outA()
self.theA = self._get()
if self.theA == self.theB:
break
if self.theA <= '\n':
raise UnterminatedStringLiteral()
if self.theA == '\\':
self._outA()
self.theA = self._get()
if action <= 3:
self.theB = self._next()
if self.theB == '/' and (self.theA == '(' or self.theA == ',' or
self.theA == '=' or self.theA == ':' or
self.theA == '[' or self.theA == '?' or
self.theA == '!' or self.theA == '&' or
self.theA == '|'):
self._outA()
self._outB()
while 1:
self.theA = self._get()
if self.theA == '/':
break
elif self.theA == '\\':
self._outA()
self.theA = self._get()
elif self.theA <= '\n':
raise UnterminatedRegularExpression()
self._outA()
self.theB = self._next()
def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1)
def minify(self, instream, outstream):
self.instream = instream
self.outstream = outstream
self.theA = None
self.thaB = None
self.theLookahead = None
self._jsmin()
self.instream.close()
if __name__ == '__main__':
import sys
jsm = JavascriptMinify()
jsm.minify(sys.stdin, sys.stdout)
|
mit
|
zsjohny/jumpserver
|
apps/settings/forms/ldap.py
|
1
|
1449
|
# coding: utf-8
#
from django import forms
from django.utils.translation import ugettext_lazy as _
from common.fields import FormDictField, FormEncryptCharField
from .base import BaseForm
__all__ = ['LDAPSettingForm']
class LDAPSettingForm(BaseForm):
AUTH_LDAP_SERVER_URI = forms.CharField(
label=_("LDAP server"),
)
AUTH_LDAP_BIND_DN = forms.CharField(
required=False, label=_("Bind DN"),
)
AUTH_LDAP_BIND_PASSWORD = FormEncryptCharField(
label=_("Password"),
widget=forms.PasswordInput, required=False
)
AUTH_LDAP_SEARCH_OU = forms.CharField(
label=_("User OU"),
help_text=_("Use | split User OUs"),
required=False,
)
AUTH_LDAP_SEARCH_FILTER = forms.CharField(
label=_("User search filter"),
help_text=_("Choice may be (cn|uid|sAMAccountName)=%(user)s)")
)
AUTH_LDAP_USER_ATTR_MAP = FormDictField(
label=_("User attr map"),
help_text=_(
"User attr map present how to map LDAP user attr to jumpserver, "
"username,name,email is jumpserver attr"
),
)
# AUTH_LDAP_GROUP_SEARCH_OU = CONFIG.AUTH_LDAP_GROUP_SEARCH_OU
# AUTH_LDAP_GROUP_SEARCH_FILTER = CONFIG.AUTH_LDAP_GROUP_SEARCH_FILTER
# AUTH_LDAP_START_TLS = forms.BooleanField(
# label=_("Use SSL"), required=False
# )
AUTH_LDAP = forms.BooleanField(label=_("Enable LDAP auth"), required=False)
|
gpl-2.0
|
midnightmagic/bitcoin
|
test/functional/rpc_decodescript.py
|
45
|
16652
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test decoding scripts via decodescript RPC command."""
from test_framework.messages import CTransaction, sha256
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, hex_str_to_bytes
from io import BytesIO
class DecodeScriptTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def decodescript_script_sig(self):
signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
push_signature = '48' + signature
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
# below are test cases for all of the standard transaction types
# 1) P2PK scriptSig
# the scriptSig of a public key scriptPubKey simply pushes a signature onto the stack
rpc_result = self.nodes[0].decodescript(push_signature)
assert_equal(signature, rpc_result['asm'])
# 2) P2PKH scriptSig
rpc_result = self.nodes[0].decodescript(push_signature + push_public_key)
assert_equal(signature + ' ' + public_key, rpc_result['asm'])
# 3) multisig scriptSig
# this also tests the leading portion of a P2SH multisig scriptSig
# OP_0 <A sig> <B sig>
rpc_result = self.nodes[0].decodescript('00' + push_signature + push_signature)
assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm'])
# 4) P2SH scriptSig
# an empty P2SH redeemScript is valid and makes for a very simple test case.
# thus, such a spending scriptSig would just need to pass the outer redeemScript
# hash test and leave true on the top of the stack.
rpc_result = self.nodes[0].decodescript('5100')
assert_equal('1 0', rpc_result['asm'])
# 5) null data scriptSig - no such thing because null data scripts can not be spent.
# thus, no test case for that standard transaction type is here.
def decodescript_script_pub_key(self):
public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2'
push_public_key = '21' + public_key
public_key_hash = '5dd1d3a048119c27b28293056724d9522f26d945'
push_public_key_hash = '14' + public_key_hash
uncompressed_public_key = '04b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb25e01fc8fde47c96c98a4f3a8123e33a38a50cf9025cc8c4494a518f991792bb7'
push_uncompressed_public_key = '41' + uncompressed_public_key
p2wsh_p2pk_script_hash = 'd8590cf8ea0674cf3d49fd7ca249b85ef7485dea62c138468bddeb20cd6519f7'
# below are test cases for all of the standard transaction types
# 1) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_public_key + 'ac')
assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm'])
# P2PK is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 2) P2PKH scriptPubKey
# OP_DUP OP_HASH160 <PubKeyHash> OP_EQUALVERIFY OP_CHECKSIG
rpc_result = self.nodes[0].decodescript('76a9' + push_public_key_hash + '88ac')
assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm'])
# P2PKH is translated to P2WPKH
assert_equal('0 ' + public_key_hash, rpc_result['segwit']['asm'])
# 3) multisig scriptPubKey
# <m> <A pubkey> <B pubkey> <C pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
multisig_script = '52' + push_public_key + push_public_key + push_public_key + '53ae'
rpc_result = self.nodes[0].decodescript(multisig_script)
assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm'])
# multisig in P2WSH
multisig_script_hash = sha256(hex_str_to_bytes(multisig_script)).hex()
assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm'])
# 4) P2SH scriptPubKey
# OP_HASH160 <Hash160(redeemScript)> OP_EQUAL.
# push_public_key_hash here should actually be the hash of a redeem script.
# but this works the same for purposes of this test.
rpc_result = self.nodes[0].decodescript('a9' + push_public_key_hash + '87')
assert_equal('OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm'])
# P2SH does not work in segwit secripts. decodescript should not return a result for it.
assert 'segwit' not in rpc_result
# 5) null data scriptPubKey
# use a signature look-alike here to make sure that we do not decode random data as a signature.
# this matters if/when signature sighash decoding comes along.
# would want to make sure that no such decoding takes place in this case.
signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001'
# OP_RETURN <data>
rpc_result = self.nodes[0].decodescript('6a' + signature_imposter)
assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm'])
# 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here.
# OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY.
# just imagine that the pub keys used below are different.
# for our purposes here it does not matter that they are the same even though it is unrealistic.
#
# OP_IF
# <receiver-pubkey> OP_CHECKSIGVERIFY
# OP_ELSE
# <lock-until> OP_CHECKLOCKTIMEVERIFY OP_DROP
# OP_ENDIF
# <sender-pubkey> OP_CHECKSIG
#
# lock until block 500,000
cltv_script = '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac'
rpc_result = self.nodes[0].decodescript(cltv_script)
assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm'])
# CLTV script in P2WSH
cltv_script_hash = sha256(hex_str_to_bytes(cltv_script)).hex()
assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm'])
# 7) P2PK scriptPubKey
# <pubkey> OP_CHECKSIG
rpc_result = self.nodes[0].decodescript(push_uncompressed_public_key + 'ac')
assert_equal(uncompressed_public_key + ' OP_CHECKSIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 8) multisig scriptPubKey with an uncompressed pubkey
# <m> <A pubkey> <B pubkey> <n> OP_CHECKMULTISIG
# just imagine that the pub keys used below are different.
# the purpose of this test is to check that a segwit script is not returned for bare multisig scripts
# with an uncompressed pubkey in them.
rpc_result = self.nodes[0].decodescript('52' + push_public_key + push_uncompressed_public_key +'52ae')
assert_equal('2 ' + public_key + ' ' + uncompressed_public_key + ' 2 OP_CHECKMULTISIG', rpc_result['asm'])
# uncompressed pubkeys are invalid for checksigs in segwit scripts.
# decodescript should not return a P2WPKH equivalent.
assert 'segwit' not in rpc_result
# 9) P2WPKH scriptpubkey
# 0 <PubKeyHash>
rpc_result = self.nodes[0].decodescript('00' + push_public_key_hash)
assert_equal('0 ' + public_key_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
# 10) P2WSH scriptpubkey
# 0 <ScriptHash>
# even though this hash is of a P2PK script which is better used as bare P2WPKH, it should not matter
# for the purpose of this test.
rpc_result = self.nodes[0].decodescript('0020' + p2wsh_p2pk_script_hash)
assert_equal('0 ' + p2wsh_p2pk_script_hash, rpc_result['asm'])
# segwit scripts do not work nested into each other.
# a nested segwit script should not be returned in the results.
assert 'segwit' not in rpc_result
def decoderawtransaction_asm_sighashtype(self):
"""Test decoding scripts via RPC command "decoderawtransaction".
This test is in with the "decodescript" tests because they are testing the same "asm" script decodes.
"""
# this test case uses a random plain vanilla mainnet transaction with a single P2PKH input and output
tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm'])
# this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs.
# it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc
# verify that we have not altered scriptPubKey decoding.
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid'])
assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm'])
assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
txSave = CTransaction()
txSave.deserialize(BytesIO(hex_str_to_bytes(tx)))
# make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type
tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm'])
# verify that we have not altered scriptPubKey processing even of a specially crafted P2PKH pubkeyhash and P2SH redeem script hash that is made to pass the der signature checks
tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000'
rpc_result = self.nodes[0].decoderawtransaction(tx)
assert_equal('OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm'])
assert_equal('OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm'])
# some more full transaction tests of varying specific scriptSigs. used instead of
# tests in decodescript_script_sig because the decodescript RPC is specifically
# for working on scriptPubKeys (argh!).
push_signature = txSave.vin[0].scriptSig.hex()[2:(0x48*2+4)]
signature = push_signature[2:]
der_signature = signature[:-2]
signature_sighash_decoded = der_signature + '[ALL]'
signature_2 = der_signature + '82'
push_signature_2 = '48' + signature_2
signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]'
# 1) P2PK scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# make sure that the sighash decodes come out correctly for a more complex / lesser used case.
txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 2) multisig scriptSig
txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2)
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm'])
# 3) test a scriptSig that contains more than push operations.
# in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it.
txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101')
rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex())
assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm'])
def run_test(self):
self.decodescript_script_sig()
self.decodescript_script_pub_key()
self.decoderawtransaction_asm_sighashtype()
if __name__ == '__main__':
DecodeScriptTest().main()
|
mit
|
radjkarl/imgProcessor
|
imgProcessor/measure/SNR/SNR_IEC.py
|
1
|
1187
|
# coding=utf-8
from __future__ import division
import numpy as np
def SNR_IEC(i1, i2, ibg=0, allow_color_images=False):
'''
Calculate the averaged signal-to-noise ratio SNR50
as defined by IEC NP 60904-13
needs 2 reference EL images and one background image
'''
# ensure images are type float64 (double precision):
i1 = np.asfarray(i1)
i2 = np.asfarray(i2)
if ibg is not 0:
ibg = np.asfarray(ibg)
assert i1.shape == ibg.shape, 'all input images need to have the same resolution'
assert i1.shape == i2.shape, 'all input images need to have the same resolution'
if not allow_color_images:
assert i1.ndim == 2, 'Images need to be in grayscale according to the IEC standard'
# SNR calculation as defined in 'IEC TS 60904-13':
signal = 0.5 * (i1 + i2) - ibg
noise = 0.5**0.5 * np.abs(i1 - i2) * ((2 / np.pi)**-0.5)
if signal.ndim == 3: # color
signal = np.average(signal, axis=2, weights=(0.114, 0.587, 0.299))
noise = np.average(noise, axis=2, weights=(0.114, 0.587, 0.299))
signal = signal.sum()
noise = noise.sum()
return signal / noise
|
gpl-3.0
|
PIVX-Project/PIVX
|
test/functional/test_framework/bignum.py
|
6
|
1911
|
#!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Big number routines.
This file is copied from python-bitcoinlib.
"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# pivx-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
mit
|
kingmotley/SickRage
|
sickbeard/name_parser/parser.py
|
1
|
24227
|
# coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import re
import os.path
import sickbeard
from sickbeard.name_parser import regexes
from sickbeard import logger, helpers, scene_numbering, common, scene_exceptions, db
from sickrage.helper.common import remove_extension
from sickrage.helper.encoding import ek
from sickrage.helper.exceptions import ex
import dateutil
class NameParser(object):
ALL_REGEX = 0
NORMAL_REGEX = 1
ANIME_REGEX = 2
def __init__(self, file_name=True, showObj=None, tryIndexers=False, # pylint: disable=too-many-arguments
naming_pattern=False, parse_method=None):
self.file_name = file_name
self.showObj = showObj
self.tryIndexers = tryIndexers
self.naming_pattern = naming_pattern
if (self.showObj and not self.showObj.is_anime) or parse_method == 'normal':
self._compile_regexes(self.NORMAL_REGEX)
elif (self.showObj and self.showObj.is_anime) or parse_method == 'anime':
self._compile_regexes(self.ANIME_REGEX)
else:
self._compile_regexes(self.ALL_REGEX)
@staticmethod
def clean_series_name(series_name):
"""Cleans up series name by removing any . and _
characters, along with any trailing hyphens.
Is basically equivalent to replacing all _ and . with a
space, but handles decimal numbers in string, for example:
>>> cleanRegexedSeriesName("an.example.1.0.test")
'an example 1.0 test'
>>> cleanRegexedSeriesName("an_example_1.0_test")
'an example 1.0 test'
Stolen from dbr's tvnamer
"""
series_name = re.sub(r"(\D)\.(?!\s)(\D)", "\\1 \\2", series_name)
series_name = re.sub(r"(\d)\.(\d{4})", "\\1 \\2", series_name) # if it ends in a year then don't keep the dot
series_name = re.sub(r"(\D)\.(?!\s)", "\\1 ", series_name)
series_name = re.sub(r"\.(?!\s)(\D)", " \\1", series_name)
series_name = series_name.replace("_", " ")
series_name = re.sub(r"-$", "", series_name)
series_name = re.sub(r"^\[.*\]", "", series_name)
return series_name.strip()
def _compile_regexes(self, regexMode):
if regexMode == self.ANIME_REGEX:
dbg_str = u"ANIME"
uncompiled_regex = [regexes.anime_regexes]
elif regexMode == self.NORMAL_REGEX:
dbg_str = u"NORMAL"
uncompiled_regex = [regexes.normal_regexes]
else:
dbg_str = u"ALL"
uncompiled_regex = [regexes.normal_regexes, regexes.anime_regexes]
self.compiled_regexes = []
for regexItem in uncompiled_regex:
for cur_pattern_num, (cur_pattern_name, cur_pattern) in enumerate(regexItem):
try:
cur_regex = re.compile(cur_pattern, re.VERBOSE | re.I)
except re.error, errormsg:
logger.log(u"WARNING: Invalid episode_pattern using {0} regexs, {1}. {2}".format(dbg_str, errormsg, cur_pattern))
else:
self.compiled_regexes.append((cur_pattern_num, cur_pattern_name, cur_regex))
def _parse_string(self, name): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
if not name:
return
matches = []
bestResult = None
for (cur_regex_num, cur_regex_name, cur_regex) in self.compiled_regexes:
match = cur_regex.match(name)
if not match:
continue
result = ParseResult(name)
result.which_regex = [cur_regex_name]
result.score = 0 - cur_regex_num
named_groups = match.groupdict().keys()
if 'series_name' in named_groups:
result.series_name = match.group('series_name')
if result.series_name:
result.series_name = self.clean_series_name(result.series_name)
result.score += 1
if 'series_num' in named_groups and match.group('series_num'):
result.score += 1
if 'season_num' in named_groups:
tmp_season = int(match.group('season_num'))
if cur_regex_name == 'bare' and tmp_season in (19, 20):
continue
result.season_number = tmp_season
result.score += 1
if 'ep_num' in named_groups:
ep_num = self._convert_number(match.group('ep_num'))
if 'extra_ep_num' in named_groups and match.group('extra_ep_num'):
result.episode_numbers = range(ep_num, self._convert_number(match.group('extra_ep_num')) + 1)
result.score += 1
else:
result.episode_numbers = [ep_num]
result.score += 3
if 'ep_ab_num' in named_groups:
ep_ab_num = self._convert_number(match.group('ep_ab_num'))
if 'extra_ab_ep_num' in named_groups and match.group('extra_ab_ep_num'):
result.ab_episode_numbers = range(ep_ab_num,
self._convert_number(match.group('extra_ab_ep_num')) + 1)
result.score += 1
else:
result.ab_episode_numbers = [ep_ab_num]
result.score += 1
if 'air_date' in named_groups:
air_date = match.group('air_date')
try:
assert re.sub(r'[^\d]*', '', air_date) != '112263'
result.air_date = dateutil.parser.parse(air_date, fuzzy_with_tokens=True)[0].date()
result.score += 1
except Exception:
continue
if 'extra_info' in named_groups:
tmp_extra_info = match.group('extra_info')
# Show.S04.Special or Show.S05.Part.2.Extras is almost certainly not every episode in the season
if tmp_extra_info and cur_regex_name == 'season_only' and re.search(
r'([. _-]|^)(special|extra)s?\w*([. _-]|$)', tmp_extra_info, re.I):
continue
result.extra_info = tmp_extra_info
result.score += 1
if 'release_group' in named_groups:
result.release_group = match.group('release_group')
result.score += 1
if 'version' in named_groups:
# assigns version to anime file if detected using anime regex. Non-anime regex receives -1
version = match.group('version')
if version:
result.version = version
else:
result.version = 1
else:
result.version = -1
matches.append(result)
if matches:
# pick best match with highest score based on placement
bestResult = max(sorted(matches, reverse=True, key=lambda x: x.which_regex), key=lambda x: x.score)
show = None
if not self.naming_pattern:
# try and create a show object for this result
show = helpers.get_show(bestResult.series_name, self.tryIndexers)
# confirm passed in show object indexer id matches result show object indexer id
if show:
if self.showObj and show.indexerid != self.showObj.indexerid:
show = None
bestResult.show = show
elif not show and self.showObj:
bestResult.show = self.showObj
# if this is a naming pattern test or result doesn't have a show object then return best result
if not bestResult.show or self.naming_pattern:
return bestResult
# get quality
bestResult.quality = common.Quality.nameQuality(name, bestResult.show.is_anime)
new_episode_numbers = []
new_season_numbers = []
new_absolute_numbers = []
# if we have an air-by-date show then get the real season/episode numbers
if bestResult.is_air_by_date:
airdate = bestResult.air_date.toordinal()
main_db_con = db.DBConnection()
sql_result = main_db_con.select(
"SELECT season, episode FROM tv_episodes WHERE showid = ? and indexer = ? and airdate = ?",
[bestResult.show.indexerid, bestResult.show.indexer, airdate])
season_number = None
episode_numbers = []
if sql_result:
season_number = int(sql_result[0][0])
episode_numbers = [int(sql_result[0][1])]
if season_number is None or not episode_numbers:
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(bestResult.show.indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = bestResult.show.lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
t = sickbeard.indexerApi(bestResult.show.indexer).indexer(**lINDEXER_API_PARMS)
epObj = t[bestResult.show.indexerid].airedOn(bestResult.air_date)[0]
season_number = int(epObj["seasonnumber"])
episode_numbers = [int(epObj["episodenumber"])]
except sickbeard.indexer_episodenotfound:
logger.log(u"Unable to find episode with date " + str(bestResult.air_date) + " for show " + bestResult.show.name + ", skipping", logger.WARNING)
episode_numbers = []
except sickbeard.indexer_error as e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(bestResult.show.indexer).name + ": " + ex(e), logger.WARNING)
episode_numbers = []
for epNo in episode_numbers:
s = season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
season_number,
epNo)
new_episode_numbers.append(e)
new_season_numbers.append(s)
elif bestResult.show.is_anime and bestResult.ab_episode_numbers:
scene_season = scene_exceptions.get_scene_exception_by_name(bestResult.series_name)[1]
for epAbsNo in bestResult.ab_episode_numbers:
a = epAbsNo
if bestResult.show.is_scene:
a = scene_numbering.get_indexer_absolute_numbering(bestResult.show.indexerid,
bestResult.show.indexer, epAbsNo,
True, scene_season)
(s, e) = helpers.get_all_episodes_from_absolute_number(bestResult.show, [a])
new_absolute_numbers.append(a)
new_episode_numbers.extend(e)
new_season_numbers.append(s)
elif bestResult.season_number and bestResult.episode_numbers:
for epNo in bestResult.episode_numbers:
s = bestResult.season_number
e = epNo
if bestResult.show.is_scene:
(s, e) = scene_numbering.get_indexer_numbering(bestResult.show.indexerid,
bestResult.show.indexer,
bestResult.season_number,
epNo)
if bestResult.show.is_anime:
a = helpers.get_absolute_number_from_season_and_episode(bestResult.show, s, e)
if a:
new_absolute_numbers.append(a)
new_episode_numbers.append(e)
new_season_numbers.append(s)
# need to do a quick sanity check heregex. It's possible that we now have episodes
# from more than one season (by tvdb numbering), and this is just too much
# for sickbeard, so we'd need to flag it.
new_season_numbers = list(set(new_season_numbers)) # remove duplicates
if len(new_season_numbers) > 1:
raise InvalidNameException("Scene numbering results episodes from "
"seasons %s, (i.e. more than one) and "
"sickrage does not support this. "
"Sorry." % (str(new_season_numbers)))
# I guess it's possible that we'd have duplicate episodes too, so lets
# eliminate them
new_episode_numbers = list(set(new_episode_numbers))
new_episode_numbers.sort()
# maybe even duplicate absolute numbers so why not do them as well
new_absolute_numbers = list(set(new_absolute_numbers))
new_absolute_numbers.sort()
if new_absolute_numbers:
bestResult.ab_episode_numbers = new_absolute_numbers
if new_season_numbers and new_episode_numbers:
bestResult.episode_numbers = new_episode_numbers
bestResult.season_number = new_season_numbers[0]
if bestResult.show.is_scene:
logger.log(
u"Converted parsed result " + bestResult.original_name + " into " + str(bestResult).decode('utf-8',
'xmlcharrefreplace'),
logger.DEBUG)
# CPU sleep
time.sleep(0.02)
return bestResult
@staticmethod
def _combine_results(first, second, attr):
# if the first doesn't exist then return the second or nothing
if not first:
if not second:
return None
else:
return getattr(second, attr)
# if the second doesn't exist then return the first
if not second:
return getattr(first, attr)
a = getattr(first, attr)
b = getattr(second, attr)
# if a is good use it
if a is not None or (isinstance(a, list) and a):
return a
# if not use b (if b isn't set it'll just be default)
else:
return b
@staticmethod
def _unicodify(obj, encoding="utf-8"):
if isinstance(obj, bytes):
obj = unicode(obj, encoding, 'replace')
return obj
@staticmethod
def _convert_number(org_number):
"""
Convert org_number into an integer
org_number: integer or representation of a number: string or unicode
Try force converting to int first, on error try converting from Roman numerals
returns integer or 0
"""
try:
# try forcing to int
if org_number:
number = int(org_number)
else:
number = 0
except Exception:
# on error try converting from Roman numerals
roman_to_int_map = (
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10),
('IX', 9), ('V', 5), ('IV', 4), ('I', 1)
)
roman_numeral = str(org_number).upper()
number = 0
index = 0
for numeral, integer in roman_to_int_map:
while roman_numeral[index:index + len(numeral)] == numeral:
number += integer
index += len(numeral)
return number
def parse(self, name, cache_result=True):
name = self._unicodify(name)
if self.naming_pattern:
cache_result = False
cached = name_parser_cache.get(name)
if cached:
return cached
# break it into parts if there are any (dirname, file name, extension)
dir_name, file_name = ek(os.path.split, name)
if self.file_name:
base_file_name = remove_extension(file_name)
else:
base_file_name = file_name
# set up a result to use
final_result = ParseResult(name)
# try parsing the file name
file_name_result = self._parse_string(base_file_name)
# use only the direct parent dir
dir_name = ek(os.path.basename, dir_name)
# parse the dirname for extra info if needed
dir_name_result = self._parse_string(dir_name)
# build the ParseResult object
final_result.air_date = self._combine_results(file_name_result, dir_name_result, 'air_date')
# anime absolute numbers
final_result.ab_episode_numbers = self._combine_results(file_name_result, dir_name_result, 'ab_episode_numbers')
# season and episode numbers
final_result.season_number = self._combine_results(file_name_result, dir_name_result, 'season_number')
final_result.episode_numbers = self._combine_results(file_name_result, dir_name_result, 'episode_numbers')
# if the dirname has a release group/show name I believe it over the filename
final_result.series_name = self._combine_results(dir_name_result, file_name_result, 'series_name')
final_result.extra_info = self._combine_results(dir_name_result, file_name_result, 'extra_info')
final_result.release_group = self._combine_results(dir_name_result, file_name_result, 'release_group')
final_result.version = self._combine_results(dir_name_result, file_name_result, 'version')
final_result.which_regex = []
if final_result == file_name_result:
final_result.which_regex = file_name_result.which_regex
elif final_result == dir_name_result:
final_result.which_regex = dir_name_result.which_regex
else:
if file_name_result:
final_result.which_regex += file_name_result.which_regex
if dir_name_result:
final_result.which_regex += dir_name_result.which_regex
final_result.show = self._combine_results(file_name_result, dir_name_result, 'show')
final_result.quality = self._combine_results(file_name_result, dir_name_result, 'quality')
if not final_result.show:
raise InvalidShowException("Unable to match {0} to a show in your database".format
(name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')))
# if there's no useful info in it then raise an exception
if final_result.season_number is None and not final_result.episode_numbers and final_result.air_date is None and not final_result.ab_episode_numbers and not final_result.series_name:
raise InvalidNameException("Unable to parse {0} to a valid episode".format
(name.encode(sickbeard.SYS_ENCODING, 'xmlcharrefreplace')))
if cache_result:
name_parser_cache.add(name, final_result)
logger.log(u"Parsed " + name + " into " + str(final_result).decode('utf-8', 'xmlcharrefreplace'), logger.DEBUG)
return final_result
class ParseResult(object): # pylint: disable=too-many-instance-attributes
def __init__(self, original_name, series_name=None, season_number=None, # pylint: disable=too-many-arguments
episode_numbers=None, extra_info=None, release_group=None,
air_date=None, ab_episode_numbers=None, show=None,
score=None, quality=None, version=None):
self.original_name = original_name
self.series_name = series_name
self.season_number = season_number
if not episode_numbers:
self.episode_numbers = []
else:
self.episode_numbers = episode_numbers
if not ab_episode_numbers:
self.ab_episode_numbers = []
else:
self.ab_episode_numbers = ab_episode_numbers
if not quality:
self.quality = common.Quality.UNKNOWN
else:
self.quality = quality
self.extra_info = extra_info
self.release_group = release_group
self.air_date = air_date
self.which_regex = []
self.show = show
self.score = score
self.version = version
def __eq__(self, other):
return other and all([
self.series_name == other.series_name,
self.season_number == other.season_number,
self.episode_numbers == other.episode_numbers,
self.extra_info == other.extra_info,
self.release_group == other.release_group,
self.air_date == other.air_date,
self.ab_episode_numbers == other.ab_episode_numbers,
self.show == other.show,
self.score == other.score,
self.quality == other.quality,
self.version == other.version
])
def __str__(self):
if self.series_name is not None:
to_return = self.series_name + u' - '
else:
to_return = u''
if self.season_number is not None:
to_return += 'S' + str(self.season_number).zfill(2)
if self.episode_numbers:
for e in self.episode_numbers:
to_return += 'E' + str(e).zfill(2)
if self.is_air_by_date:
to_return += str(self.air_date)
if self.ab_episode_numbers:
to_return += ' [ABS: ' + str(self.ab_episode_numbers) + ']'
if self.version and self.is_anime is True:
to_return += ' [ANIME VER: ' + str(self.version) + ']'
if self.release_group:
to_return += ' [GROUP: ' + self.release_group + ']'
to_return += ' [ABD: ' + str(self.is_air_by_date) + ']'
to_return += ' [ANIME: ' + str(self.is_anime) + ']'
to_return += ' [whichReg: ' + str(self.which_regex) + ']'
return to_return.encode('utf-8')
@property
def is_air_by_date(self):
if self.air_date:
return True
return False
@property
def is_anime(self):
if self.ab_episode_numbers:
return True
return False
class NameParserCache(object):
_previous_parsed = {}
_cache_size = 100
def add(self, name, parse_result):
self._previous_parsed[name] = parse_result
while len(self._previous_parsed) > self._cache_size:
del self._previous_parsed[self._previous_parsed.keys()[0]]
def get(self, name):
if name in self._previous_parsed:
logger.log(u"Using cached parse result for: " + name, logger.DEBUG)
return self._previous_parsed[name]
name_parser_cache = NameParserCache()
class InvalidNameException(Exception):
"""The given release name is not valid"""
class InvalidShowException(Exception):
"""The given show name is not valid"""
|
gpl-3.0
|
c0710204/edx-platform
|
common/djangoapps/util/tests/test_submit_feedback.py
|
40
|
16550
|
"""Tests for the Zendesk"""
from django.contrib.auth.models import AnonymousUser
from django.http import Http404
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from student.tests.factories import UserFactory
from util import views
from zendesk import ZendeskError
import json
import mock
from student.tests.test_microsite import fake_microsite_get_value
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": True})
@override_settings(ZENDESK_URL="dummy", ZENDESK_USER="dummy", ZENDESK_API_KEY="dummy")
@mock.patch("util.views.dog_stats_api")
@mock.patch("util.views._ZendeskApi", autospec=True)
class SubmitFeedbackTest(TestCase):
def setUp(self):
"""Set up data for the test case"""
self._request_factory = RequestFactory()
self._anon_user = AnonymousUser()
self._auth_user = UserFactory.create(
email="[email protected]",
username="test",
profile__name="Test User"
)
# This contains issue_type and course_id to ensure that tags are submitted correctly
self._anon_fields = {
"email": "[email protected]",
"name": "Test User",
"subject": "a subject",
"details": "some details",
"issue_type": "test_issue",
"course_id": "test_course"
}
# This does not contain issue_type nor course_id to ensure that they are optional
self._auth_fields = {"subject": "a subject", "details": "some details"}
def _build_and_run_request(self, user, fields):
"""
Generate a request and invoke the view, returning the response.
The request will be a POST request from the given `user`, with the given
`fields` in the POST body.
"""
req = self._request_factory.post(
"/submit_feedback",
data=fields,
HTTP_REFERER="test_referer",
HTTP_USER_AGENT="test_user_agent",
REMOTE_ADDR="1.2.3.4",
SERVER_NAME="test_server",
)
req.user = user
return views.submit_feedback(req)
def _assert_bad_request(self, response, field, zendesk_mock_class, datadog_mock):
"""
Assert that the given `response` contains correct failure data.
It should have a 400 status code, and its content should be a JSON
object containing the specified `field` and an `error`.
"""
self.assertEqual(response.status_code, 400)
resp_json = json.loads(response.content)
self.assertTrue("field" in resp_json)
self.assertEqual(resp_json["field"], field)
self.assertTrue("error" in resp_json)
# There should be absolutely no interaction with Zendesk
self.assertFalse(zendesk_mock_class.return_value.mock_calls)
self.assertFalse(datadog_mock.mock_calls)
def _test_bad_request_omit_field(self, user, fields, omit_field, zendesk_mock_class, datadog_mock):
"""
Invoke the view with a request missing a field and assert correctness.
The request will be a POST request from the given `user`, with POST
fields taken from `fields` minus the entry specified by `omit_field`.
The response should have a 400 (bad request) status code and specify
the invalid field and an error message, and the Zendesk API should not
have been invoked.
"""
filtered_fields = {k: v for (k, v) in fields.items() if k != omit_field}
resp = self._build_and_run_request(user, filtered_fields)
self._assert_bad_request(resp, omit_field, zendesk_mock_class, datadog_mock)
def _test_bad_request_empty_field(self, user, fields, empty_field, zendesk_mock_class, datadog_mock):
"""
Invoke the view with an empty field and assert correctness.
The request will be a POST request from the given `user`, with POST
fields taken from `fields`, replacing the entry specified by
`empty_field` with the empty string. The response should have a 400
(bad request) status code and specify the invalid field and an error
message, and the Zendesk API should not have been invoked.
"""
altered_fields = fields.copy()
altered_fields[empty_field] = ""
resp = self._build_and_run_request(user, altered_fields)
self._assert_bad_request(resp, empty_field, zendesk_mock_class, datadog_mock)
def _test_success(self, user, fields):
"""
Generate a request, invoke the view, and assert success.
The request will be a POST request from the given `user`, with the given
`fields` in the POST body. The response should have a 200 (success)
status code.
"""
resp = self._build_and_run_request(user, fields)
self.assertEqual(resp.status_code, 200)
def _assert_datadog_called(self, datadog_mock, with_tags):
expected_datadog_calls = [
mock.call.increment(
views.DATADOG_FEEDBACK_METRIC,
tags=(["course_id:test_course", "issue_type:test_issue"] if with_tags else [])
)
]
self.assertEqual(datadog_mock.mock_calls, expected_datadog_calls)
def test_bad_request_anon_user_no_name(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `name`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "name", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_no_email(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `email`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "email", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_invalid_email(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user specifying an invalid `email`."""
fields = self._anon_fields.copy()
fields["email"] = "This is not a valid email address!"
resp = self._build_and_run_request(self._anon_user, fields)
self._assert_bad_request(resp, "email", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_no_subject(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `subject`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "subject", zendesk_mock_class, datadog_mock)
def test_bad_request_anon_user_no_details(self, zendesk_mock_class, datadog_mock):
"""Test a request from an anonymous user not specifying `details`."""
self._test_bad_request_omit_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._anon_user, self._anon_fields, "details", zendesk_mock_class, datadog_mock)
def test_valid_request_anon_user(self, zendesk_mock_class, datadog_mock):
"""
Test a valid request from an anonymous user.
The response should have a 200 (success) status code, and a ticket with
the given information should have been submitted via the Zendesk API.
"""
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = 42
self._test_success(self._anon_user, self._anon_fields)
expected_zendesk_calls = [
mock.call.create_ticket(
{
"ticket": {
"requester": {"name": "Test User", "email": "[email protected]"},
"subject": "a subject",
"comment": {"body": "some details"},
"tags": ["test_course", "test_issue", "LMS"]
}
}
),
mock.call.update_ticket(
42,
{
"ticket": {
"comment": {
"public": False,
"body":
"Additional information:\n\n"
"Client IP: 1.2.3.4\n"
"Host: test_server\n"
"Page: test_referer\n"
"Browser: test_user_agent"
}
}
}
)
]
self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls)
self._assert_datadog_called(datadog_mock, with_tags=True)
@mock.patch("microsite_configuration.microsite.get_value", fake_microsite_get_value)
def test_valid_request_anon_user_microsite(self, zendesk_mock_class, datadog_mock):
"""
Test a valid request from an anonymous user to a mocked out microsite
The response should have a 200 (success) status code, and a ticket with
the given information should have been submitted via the Zendesk API with the additional
tag that will come from microsite configuration
"""
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = 42
self._test_success(self._anon_user, self._anon_fields)
expected_zendesk_calls = [
mock.call.create_ticket(
{
"ticket": {
"requester": {"name": "Test User", "email": "[email protected]"},
"subject": "a subject",
"comment": {"body": "some details"},
"tags": ["test_course", "test_issue", "LMS", "whitelabel_fakeorg"]
}
}
),
mock.call.update_ticket(
42,
{
"ticket": {
"comment": {
"public": False,
"body":
"Additional information:\n\n"
"Client IP: 1.2.3.4\n"
"Host: test_server\n"
"Page: test_referer\n"
"Browser: test_user_agent"
}
}
}
)
]
self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls)
self._assert_datadog_called(datadog_mock, with_tags=True)
def test_bad_request_auth_user_no_subject(self, zendesk_mock_class, datadog_mock):
"""Test a request from an authenticated user not specifying `subject`."""
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "subject", zendesk_mock_class, datadog_mock)
def test_bad_request_auth_user_no_details(self, zendesk_mock_class, datadog_mock):
"""Test a request from an authenticated user not specifying `details`."""
self._test_bad_request_omit_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class, datadog_mock)
self._test_bad_request_empty_field(self._auth_user, self._auth_fields, "details", zendesk_mock_class, datadog_mock)
def test_valid_request_auth_user(self, zendesk_mock_class, datadog_mock):
"""
Test a valid request from an authenticated user.
The response should have a 200 (success) status code, and a ticket with
the given information should have been submitted via the Zendesk API.
"""
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.return_value = 42
self._test_success(self._auth_user, self._auth_fields)
expected_zendesk_calls = [
mock.call.create_ticket(
{
"ticket": {
"requester": {"name": "Test User", "email": "[email protected]"},
"subject": "a subject",
"comment": {"body": "some details"},
"tags": ["LMS"]
}
}
),
mock.call.update_ticket(
42,
{
"ticket": {
"comment": {
"public": False,
"body":
"Additional information:\n\n"
"username: test\n"
"Client IP: 1.2.3.4\n"
"Host: test_server\n"
"Page: test_referer\n"
"Browser: test_user_agent"
}
}
}
)
]
self.assertEqual(zendesk_mock_instance.mock_calls, expected_zendesk_calls)
self._assert_datadog_called(datadog_mock, with_tags=False)
def test_get_request(self, zendesk_mock_class, datadog_mock):
"""Test that a GET results in a 405 even with all required fields"""
req = self._request_factory.get("/submit_feedback", data=self._anon_fields)
req.user = self._anon_user
resp = views.submit_feedback(req)
self.assertEqual(resp.status_code, 405)
self.assertIn("Allow", resp)
self.assertEqual(resp["Allow"], "POST")
# There should be absolutely no interaction with Zendesk
self.assertFalse(zendesk_mock_class.mock_calls)
self.assertFalse(datadog_mock.mock_calls)
def test_zendesk_error_on_create(self, zendesk_mock_class, datadog_mock):
"""
Test Zendesk returning an error on ticket creation.
We should return a 500 error with no body
"""
err = ZendeskError(msg="", error_code=404)
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.create_ticket.side_effect = err
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(resp.status_code, 500)
self.assertFalse(resp.content)
self._assert_datadog_called(datadog_mock, with_tags=True)
def test_zendesk_error_on_update(self, zendesk_mock_class, datadog_mock):
"""
Test for Zendesk returning an error on ticket update.
If Zendesk returns any error on ticket update, we return a 200 to the
browser because the update contains additional information that is not
necessary for the user to have submitted their feedback.
"""
err = ZendeskError(msg="", error_code=500)
zendesk_mock_instance = zendesk_mock_class.return_value
zendesk_mock_instance.update_ticket.side_effect = err
resp = self._build_and_run_request(self._anon_user, self._anon_fields)
self.assertEqual(resp.status_code, 200)
self._assert_datadog_called(datadog_mock, with_tags=True)
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_FEEDBACK_SUBMISSION": False})
def test_not_enabled(self, zendesk_mock_class, datadog_mock):
"""
Test for Zendesk submission not enabled in `settings`.
We should raise Http404.
"""
with self.assertRaises(Http404):
self._build_and_run_request(self._anon_user, self._anon_fields)
def test_zendesk_not_configured(self, zendesk_mock_class, datadog_mock):
"""
Test for Zendesk not fully configured in `settings`.
For each required configuration parameter, test that setting it to
`None` causes an otherwise valid request to return a 500 error.
"""
def test_case(missing_config):
with mock.patch(missing_config, None):
with self.assertRaises(Exception):
self._build_and_run_request(self._anon_user, self._anon_fields)
test_case("django.conf.settings.ZENDESK_URL")
test_case("django.conf.settings.ZENDESK_USER")
test_case("django.conf.settings.ZENDESK_API_KEY")
|
agpl-3.0
|
hallovveen31/smooth
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
nikitabiradar/student_registration
|
janastu/lib/python2.7/encodings/cp875.py
|
593
|
13110
|
""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp875',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x53 -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x54 -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x55 -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'|' # 0x6A -> VERTICAL LINE
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xa8' # 0x70 -> DIAERESIS
u'\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\xa0' # 0x74 -> NO-BREAK SPACE
u'\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\u0385' # 0x80 -> GREEK DIALYTIKA TONOS
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x8B -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\u03b7' # 0x9A -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x9B -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x9F -> GREEK SMALL LETTER MU
u'\xb4' # 0xA0 -> ACUTE ACCENT
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\u03bd' # 0xAA -> GREEK SMALL LETTER NU
u'\u03be' # 0xAB -> GREEK SMALL LETTER XI
u'\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xAD -> GREEK SMALL LETTER PI
u'\u03c1' # 0xAE -> GREEK SMALL LETTER RHO
u'\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA
u'\xa3' # 0xB0 -> POUND SIGN
u'\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0xBB -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xBD -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xBE -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xBF -> GREEK SMALL LETTER PSI
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA
u'\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK
u'\u2015' # 0xCF -> HORIZONTAL BAR
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb1' # 0xDA -> PLUS-MINUS SIGN
u'\xbd' # 0xDB -> VULGAR FRACTION ONE HALF
u'\x1a' # 0xDC -> SUBSTITUTE
u'\u0387' # 0xDD -> GREEK ANO TELEIA
u'\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK
u'\xa6' # 0xDF -> BROKEN BAR
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\x1a' # 0xE1 -> SUBSTITUTE
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xa7' # 0xEB -> SECTION SIGN
u'\x1a' # 0xEC -> SUBSTITUTE
u'\x1a' # 0xED -> SUBSTITUTE
u'\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xEF -> NOT SIGN
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xa9' # 0xFB -> COPYRIGHT SIGN
u'\x1a' # 0xFC -> SUBSTITUTE
u'\x1a' # 0xFD -> SUBSTITUTE
u'\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
mit
|
y-higuchi/odenos
|
src/test/python/org/o3project/odenos/core/component/network/packet/test_ofp_in_packet.py
|
6
|
5404
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.packet.ofp_in_packet\
import OFPInPacket
import unittest
class InPacketTest(unittest.TestCase):
value = None
result = None
def setUp(self):
self.target = OFPInPacket("ofp_in_packet_id",
"OFPInPacket",
"ofp_in_packet_attributes",
"ofp_in_packet_node",
"ofp_in_packet_port",
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"},
"ofp_in_packet_data")
def tearDown(self):
self.target = None
def test_constructor(self):
self.assertEqual(self.target._body[self.target.PACKET_ID],
"ofp_in_packet_id")
self.assertEqual(self.target._body[self.target.TYPE],
"OFPInPacket")
self.assertEqual(self.target._body[self.target.ATTRIBUTES],
"ofp_in_packet_attributes")
self.assertEqual(self.target._body[self.target.NODE],
"ofp_in_packet_node")
self.assertEqual(self.target._body[self.target.PORT],
"ofp_in_packet_port")
self.assertEqual(self.target._body[self.target.HEADER],
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"})
self.assertEqual(self.target._body[self.target.DATA],
"ofp_in_packet_data")
def test_create_from_packed(self):
self.value = {"packet_id": "0123",
"type": "OFPInPacket",
"attributes": "0789",
"node": "9870",
"port": "6540",
"header": {"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"},
"data": "0147"}
self.result = OFPInPacket.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.PACKET_ID],
"0123")
self.assertEqual(self.result._body[self.target.TYPE],
"OFPInPacket")
self.assertEqual(self.result._body[self.target.ATTRIBUTES],
"0789")
self.assertEqual(self.result._body[self.target.NODE],
"9870")
self.assertEqual(self.result._body[self.target.PORT],
"6540")
self.assertEqual(self.result._body[self.target.HEADER]._body,
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"})
self.assertEqual(self.result._body[self.target.DATA],
"0147")
def test_packed_object(self):
self.value = {"packet_id": "0123",
"type": "OFPInPacket",
"attributes": "0789",
"node": "9870",
"port": "6540",
"header": {"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"},
"data": "0147"}
self.create = OFPInPacket.create_from_packed(self.value)
self.result = self.create.packed_object()
self.assertEqual(self.result[self.target.PACKET_ID],
"0123")
self.assertEqual(self.result[self.target.TYPE],
"OFPInPacket")
self.assertEqual(self.result[self.target.ATTRIBUTES],
"0789")
self.assertEqual(self.result[self.target.NODE],
"9870")
self.assertEqual(self.result[self.target.PORT],
"6540")
self.assertEqual(self.result[self.target.HEADER],
{"type": "OFPFlowMatch",
"in_port": "123456",
"in_node": "123456789"})
self.assertEqual(self.result[self.target.DATA],
"0147")
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
madmax983/h2o-3
|
py2/testdir_single_jvm/test_parse_many_cases.py
|
20
|
9104
|
import unittest, re, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_args
from h2o_test import verboseprint
DO_RF = False
DO_SUMMARY = False
DO_INTERMEDIATE_RESULTS = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_A_many_parse1(self):
rows = self.genrows1()
set = 1
self.tryThemAll(set, rows, enumsOnly=False)
def test_B_many_parse2(self):
rows = self.genrows2()
set = 2
self.tryThemAll(set, rows, enumsOnly=True)
# this one has problems with blank lines
def test_C_many_parse3(self):
rows = self.genrows3()
set = 3
self.tryThemAll(set, rows, enumsOnly=True)
def genrows1(self):
# comment has to have # in first column? (no leading whitespace)
# FIX! what about blank fields and spaces as sep
# FIX! temporary need more lines to avoid sample error in H2O
# throw in some variants for leading 0 on the decimal, and scientific notation
# new: change the @ to an alternate legal SEP if the special HIVE SEP is in play
rows = [
# get rid of comments. We don't really support?
# "# 'comment, is okay",
# '# "this comment, is okay too',
# "# 'this' comment, is okay too",
# don't test comma's in the header. get rid of all secondary separator-like char here
# "@FirstName@|@Middle@Initials@|@LastName@|@Date@of@Birth@", # had to remove the trailing space to avoid bad parse
"FirstName|MiddleInitials|LastName|DateofBirth", # had to remove the trailing space to avoid bad parse
"0|0.5|1|0",
"3|NaN|4|1",
"6||8|0",
"0.6|0.7|0.8|1",
"+0.6|+0.7|+0.8|0",
"-0.6|-0.7|-0.8|1",
".6|.7|.8|0",
"+.6|+.7|+.8|1",
"-.6|-.7|-.8|0",
"+0.6e0|+0.7e0|+0.8e0|1",
"-0.6e0|-0.7e0|-0.8e0|0",
".6e0|.7e0|.8e0|1",
"+.6e0|+.7e0|+.8e0|0",
"-.6e0|-.7e0|-.8e0|1",
"+0.6e00|+0.7e00|+0.8e00|0",
"-0.6e00|-0.7e00|-0.8e00|1",
".6e00|.7e00|.8e00|0",
"+.6e00|+.7e00|+.8e00|1",
"-.6e00|-.7e00|-.8e00|0",
"+0.6e-01|+0.7e-01|+0.8e-01|1",
"-0.6e-01|-0.7e-01|-0.8e-01|0",
".6e-01|.7e-01|.8e-01|1",
"+.6e-01|+.7e-01|+.8e-01|0",
"-.6e-01|-.7e-01|-.8e-01|1",
"+0.6e+01|+0.7e+01|+0.8e+01|0",
"-0.6e+01|-0.7e+01|-0.8e+01|1",
".6e+01|.7e+01|.8e+01|0",
"+.6e+01|+.7e+01|+.8e+01|1",
"-.6e+01|-.7e+01|-.8e+01|0",
"+0.6e102|+0.7e102|+0.8e102|1",
"-0.6e102|-0.7e102|-0.8e102|0",
".6e102|.7e102|.8e102|1",
"+.6e102|+.7e102|+.8e102|0",
"-.6e102|-.7e102|-.8e102|1",
]
return rows
# "# comment here is okay",
# "# comment here is okay too",
# FIX! needed an extra line to avoid bug on default 67+ sample?
def genrows2(self):
rows = [
"First@Name|@MiddleInitials|LastName@|Date@ofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"Gwendolyn|B.|Burton|1947-10-26",
"Elodia|G.|Ali|1983-10-31",
"Elo@dia|@G.|Ali@|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31"
]
return rows
# update spec
# intermixing blank lines in the first two lines breaks things
# blank lines cause all columns except the first to get NA (red)
# first may get a blank string? (not ignored)
def genrows3(self):
rows = [
"# comment here is okay",
"# comment here is okay too",
"FirstName|MiddleInitials|LastName|DateofBirth",
"Kalyn|A.|Dalton|1967-04-01",
"",
"Gwendolyn||Burton|1947-10-26",
"",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
"Elodia|G.|Ali|1983-10-31",
]
return rows
# The 3 supported line-ends
# FIX! should test them within quoted tokens
eolDict = {
0:"\n",
1:"\r\n",
2:"\r"
}
# tab here will cause problems too?
# 5:['"\t','\t"'],
# 8:["'\t","\t'"]
tokenChangeDict = {
0:['',''],
1:['\t','\t'],
1:['\t','\t'],
2:[' ',' '],
3:['"','"'],
4:['" ',' "'],
5:["'","'"],
6:["' "," '"],
}
# flip in more characters to confuse the separator decisions. for enum test data only
tokenChangeDictEnumsOnly = {
0:[' a\t','\ta '],
1:['\t a','a \t'],
2:['',''],
3:['\t','\t'],
4:[' ',' '],
5:['"','"'],
6:['" ',' "'],
7:["'","'"],
8:["' "," '"],
}
def changeTokens(self, rows, tokenCase, tokenChangeDict):
[cOpen,cClose] = tokenChangeDict[tokenCase]
newRows = []
for r in rows:
# don't quote lines that start with #
# can quote lines start with some spaces or tabs? maybe
comment = re.match(r'^[ \t]*#', r)
empty = re.match(r'^$',r)
if not (comment or empty):
r = re.sub('^',cOpen,r)
r = re.sub('\|',cClose + '|' + cOpen,r)
r = re.sub('$',cClose,r)
verboseprint(r)
newRows.append(r)
return newRows
def writeRows(self,csvPathname,rows,eol):
f = open(csvPathname, 'w')
for r in rows:
f.write(r + eol)
# what about case of missing eoll at end of file?
sepChangeDict = {
# NEW: 0x01 can be SEP character for Hive datasets
0:"",
1:",",
2:" ",
3:"\t",
}
def changeSep(self,rows,sepCase):
# do a trial replace, to see if we get a <tab><sp> problem
# comments at the beginning..get a good row
r = rows[-1]
tabseptab = re.search(r'\t|\t', r)
spsepsp = re.search(r' | ', r)
if tabseptab or spsepsp:
# use comma instead. always works
# print "Avoided"
newSep = ","
else:
newSep = self.sepChangeDict[sepCase]
newRows = [r.replace('|',newSep) for r in rows]
# special case, if using the HIVE sep, substitute randomly
# one of the other SEPs into the "@" in the template
# FIX! we need to add HIVE lineends into lineend choices.
# assuming that lineend
if newSep == "":
# don't use the same SEP to swap in.
randomOtherSep = random.choice(self.sepChangeDict.values())
while (randomOtherSep==newSep):
randomOtherSep = random.choice(self.sepChangeDict.values())
newRows = [r.replace('@',randomOtherSep) for r in newRows]
return newRows
def tryThemAll(self, set, rows, enumsOnly=False):
for eolCase in range(len(self.eolDict)):
eol = self.eolDict[eolCase]
# change tokens must be first
if enumsOnly:
tcd = self.tokenChangeDict
else:
tcd = self.tokenChangeDictEnumsOnly
for tokenCase in range(len(tcd)):
newRows1 = self.changeTokens(rows, tokenCase, tcd)
for sepCase in range(len(self.sepChangeDict)):
newRows2 = self.changeSep(newRows1,sepCase)
csvPathname = SYNDATASETS_DIR + '/parsetmp_' + \
str(set) + "_" + \
str(eolCase) + "_" + \
str(tokenCase) + "_" + \
str(sepCase) + \
'.data'
self.writeRows(csvPathname,newRows2,eol)
if "'" in tcd[tokenCase][0]:
single_quotes = 1
else:
single_quotes = 0
parseResult = h2i.import_parse(path=csvPathname, schema='local', single_quotes=single_quotes,
noPrint=not h2o_args.verbose, retryDelaySecs=0.1,
doSummary=DO_SUMMARY, intermediateResults=DO_INTERMEDIATE_RESULTS)
if DO_RF:
h2o_cmd.runRF(parseResult=parseResult, trees=1,
timeoutSecs=10, retryDelaySecs=0.1, noPrint=True, print_params=True)
verboseprint("Set", set)
h2o.check_sandbox_for_errors()
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
|
apache-2.0
|
dya2/python-for-android
|
python-modules/twisted/twisted/protocols/memcache.py
|
60
|
23339
|
# -*- test-case-name: twisted.test.test_memcache -*-
# Copyright (c) 2007-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Memcache client protocol. Memcached is a caching server, storing data in the
form of pairs key/value, and memcache is the protocol to talk with it.
To connect to a server, create a factory for L{MemCacheProtocol}::
from twisted.internet import reactor, protocol
from twisted.protocols.memcache import MemCacheProtocol, DEFAULT_PORT
d = protocol.ClientCreator(reactor, MemCacheProtocol
).connectTCP("localhost", DEFAULT_PORT)
def doSomething(proto):
# Here you call the memcache operations
return proto.set("mykey", "a lot of data")
d.addCallback(doSomething)
reactor.run()
All the operations of the memcache protocol are present, but
L{MemCacheProtocol.set} and L{MemCacheProtocol.get} are the more important.
See U{http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt} for
more information about the protocol.
"""
try:
from collections import deque
except ImportError:
class deque(list):
def popleft(self):
return self.pop(0)
from twisted.protocols.basic import LineReceiver
from twisted.protocols.policies import TimeoutMixin
from twisted.internet.defer import Deferred, fail, TimeoutError
from twisted.python import log
DEFAULT_PORT = 11211
class NoSuchCommand(Exception):
"""
Exception raised when a non existent command is called.
"""
class ClientError(Exception):
"""
Error caused by an invalid client call.
"""
class ServerError(Exception):
"""
Problem happening on the server.
"""
class Command(object):
"""
Wrap a client action into an object, that holds the values used in the
protocol.
@ivar _deferred: the L{Deferred} object that will be fired when the result
arrives.
@type _deferred: L{Deferred}
@ivar command: name of the command sent to the server.
@type command: C{str}
"""
def __init__(self, command, **kwargs):
"""
Create a command.
@param command: the name of the command.
@type command: C{str}
@param kwargs: this values will be stored as attributes of the object
for future use
"""
self.command = command
self._deferred = Deferred()
for k, v in kwargs.items():
setattr(self, k, v)
def success(self, value):
"""
Shortcut method to fire the underlying deferred.
"""
self._deferred.callback(value)
def fail(self, error):
"""
Make the underlying deferred fails.
"""
self._deferred.errback(error)
class MemCacheProtocol(LineReceiver, TimeoutMixin):
"""
MemCache protocol: connect to a memcached server to store/retrieve values.
@ivar persistentTimeOut: the timeout period used to wait for a response.
@type persistentTimeOut: C{int}
@ivar _current: current list of requests waiting for an answer from the
server.
@type _current: C{deque} of L{Command}
@ivar _lenExpected: amount of data expected in raw mode, when reading for
a value.
@type _lenExpected: C{int}
@ivar _getBuffer: current buffer of data, used to store temporary data
when reading in raw mode.
@type _getBuffer: C{list}
@ivar _bufferLength: the total amount of bytes in C{_getBuffer}.
@type _bufferLength: C{int}
@ivar _disconnected: indicate if the connectionLost has been called or not.
@type _disconnected: C{bool}
"""
MAX_KEY_LENGTH = 250
_disconnected = False
def __init__(self, timeOut=60):
"""
Create the protocol.
@param timeOut: the timeout to wait before detecting that the
connection is dead and close it. It's expressed in seconds.
@type timeOut: C{int}
"""
self._current = deque()
self._lenExpected = None
self._getBuffer = None
self._bufferLength = None
self.persistentTimeOut = self.timeOut = timeOut
def _cancelCommands(self, reason):
"""
Cancel all the outstanding commands, making them fail with C{reason}.
"""
while self._current:
cmd = self._current.popleft()
cmd.fail(reason)
def timeoutConnection(self):
"""
Close the connection in case of timeout.
"""
self._cancelCommands(TimeoutError("Connection timeout"))
self.transport.loseConnection()
def connectionLost(self, reason):
"""
Cause any outstanding commands to fail.
"""
self._disconnected = True
self._cancelCommands(reason)
LineReceiver.connectionLost(self, reason)
def sendLine(self, line):
"""
Override sendLine to add a timeout to response.
"""
if not self._current:
self.setTimeout(self.persistentTimeOut)
LineReceiver.sendLine(self, line)
def rawDataReceived(self, data):
"""
Collect data for a get.
"""
self.resetTimeout()
self._getBuffer.append(data)
self._bufferLength += len(data)
if self._bufferLength >= self._lenExpected + 2:
data = "".join(self._getBuffer)
buf = data[:self._lenExpected]
rem = data[self._lenExpected + 2:]
val = buf
self._lenExpected = None
self._getBuffer = None
self._bufferLength = None
cmd = self._current[0]
if cmd.multiple:
flags, cas = cmd.values[cmd.currentKey]
cmd.values[cmd.currentKey] = (flags, cas, val)
else:
cmd.value = val
self.setLineMode(rem)
def cmd_STORED(self):
"""
Manage a success response to a set operation.
"""
self._current.popleft().success(True)
def cmd_NOT_STORED(self):
"""
Manage a specific 'not stored' response to a set operation: this is not
an error, but some condition wasn't met.
"""
self._current.popleft().success(False)
def cmd_END(self):
"""
This the end token to a get or a stat operation.
"""
cmd = self._current.popleft()
if cmd.command == "get":
if cmd.multiple:
values = dict([(key, val[::2]) for key, val in
cmd.values.iteritems()])
cmd.success(values)
else:
cmd.success((cmd.flags, cmd.value))
elif cmd.command == "gets":
if cmd.multiple:
cmd.success(cmd.values)
else:
cmd.success((cmd.flags, cmd.cas, cmd.value))
elif cmd.command == "stats":
cmd.success(cmd.values)
def cmd_NOT_FOUND(self):
"""
Manage error response for incr/decr/delete.
"""
self._current.popleft().success(False)
def cmd_VALUE(self, line):
"""
Prepare the reading a value after a get.
"""
cmd = self._current[0]
if cmd.command == "get":
key, flags, length = line.split()
cas = ""
else:
key, flags, length, cas = line.split()
self._lenExpected = int(length)
self._getBuffer = []
self._bufferLength = 0
if cmd.multiple:
if key not in cmd.keys:
raise RuntimeError("Unexpected commands answer.")
cmd.currentKey = key
cmd.values[key] = [int(flags), cas]
else:
if cmd.key != key:
raise RuntimeError("Unexpected commands answer.")
cmd.flags = int(flags)
cmd.cas = cas
self.setRawMode()
def cmd_STAT(self, line):
"""
Reception of one stat line.
"""
cmd = self._current[0]
key, val = line.split(" ", 1)
cmd.values[key] = val
def cmd_VERSION(self, versionData):
"""
Read version token.
"""
self._current.popleft().success(versionData)
def cmd_ERROR(self):
"""
An non-existent command has been sent.
"""
log.err("Non-existent command sent.")
cmd = self._current.popleft()
cmd.fail(NoSuchCommand())
def cmd_CLIENT_ERROR(self, errText):
"""
An invalid input as been sent.
"""
log.err("Invalid input: %s" % (errText,))
cmd = self._current.popleft()
cmd.fail(ClientError(errText))
def cmd_SERVER_ERROR(self, errText):
"""
An error has happened server-side.
"""
log.err("Server error: %s" % (errText,))
cmd = self._current.popleft()
cmd.fail(ServerError(errText))
def cmd_DELETED(self):
"""
A delete command has completed successfully.
"""
self._current.popleft().success(True)
def cmd_OK(self):
"""
The last command has been completed.
"""
self._current.popleft().success(True)
def cmd_EXISTS(self):
"""
A C{checkAndSet} update has failed.
"""
self._current.popleft().success(False)
def lineReceived(self, line):
"""
Receive line commands from the server.
"""
self.resetTimeout()
token = line.split(" ", 1)[0]
# First manage standard commands without space
cmd = getattr(self, "cmd_%s" % (token,), None)
if cmd is not None:
args = line.split(" ", 1)[1:]
if args:
cmd(args[0])
else:
cmd()
else:
# Then manage commands with space in it
line = line.replace(" ", "_")
cmd = getattr(self, "cmd_%s" % (line,), None)
if cmd is not None:
cmd()
else:
# Increment/Decrement response
cmd = self._current.popleft()
val = int(line)
cmd.success(val)
if not self._current:
# No pending request, remove timeout
self.setTimeout(None)
def increment(self, key, val=1):
"""
Increment the value of C{key} by given value (default to 1).
C{key} must be consistent with an int. Return the new value.
@param key: the key to modify.
@type key: C{str}
@param val: the value to increment.
@type val: C{int}
@return: a deferred with will be called back with the new value
associated with the key (after the increment).
@rtype: L{Deferred}
"""
return self._incrdecr("incr", key, val)
def decrement(self, key, val=1):
"""
Decrement the value of C{key} by given value (default to 1).
C{key} must be consistent with an int. Return the new value, coerced to
0 if negative.
@param key: the key to modify.
@type key: C{str}
@param val: the value to decrement.
@type val: C{int}
@return: a deferred with will be called back with the new value
associated with the key (after the decrement).
@rtype: L{Deferred}
"""
return self._incrdecr("decr", key, val)
def _incrdecr(self, cmd, key, val):
"""
Internal wrapper for incr/decr.
"""
if self._disconnected:
return fail(RuntimeError("not connected"))
if not isinstance(key, str):
return fail(ClientError(
"Invalid type for key: %s, expecting a string" % (type(key),)))
if len(key) > self.MAX_KEY_LENGTH:
return fail(ClientError("Key too long"))
fullcmd = "%s %s %d" % (cmd, key, int(val))
self.sendLine(fullcmd)
cmdObj = Command(cmd, key=key)
self._current.append(cmdObj)
return cmdObj._deferred
def replace(self, key, val, flags=0, expireTime=0):
"""
Replace the given C{key}. It must already exist in the server.
@param key: the key to replace.
@type key: C{str}
@param val: the new value associated with the key.
@type val: C{str}
@param flags: the flags to store with the key.
@type flags: C{int}
@param expireTime: if different from 0, the relative time in seconds
when the key will be deleted from the store.
@type expireTime: C{int}
@return: a deferred that will fire with C{True} if the operation has
succeeded, and C{False} with the key didn't previously exist.
@rtype: L{Deferred}
"""
return self._set("replace", key, val, flags, expireTime, "")
def add(self, key, val, flags=0, expireTime=0):
"""
Add the given C{key}. It must not exist in the server.
@param key: the key to add.
@type key: C{str}
@param val: the value associated with the key.
@type val: C{str}
@param flags: the flags to store with the key.
@type flags: C{int}
@param expireTime: if different from 0, the relative time in seconds
when the key will be deleted from the store.
@type expireTime: C{int}
@return: a deferred that will fire with C{True} if the operation has
succeeded, and C{False} with the key already exists.
@rtype: L{Deferred}
"""
return self._set("add", key, val, flags, expireTime, "")
def set(self, key, val, flags=0, expireTime=0):
"""
Set the given C{key}.
@param key: the key to set.
@type key: C{str}
@param val: the value associated with the key.
@type val: C{str}
@param flags: the flags to store with the key.
@type flags: C{int}
@param expireTime: if different from 0, the relative time in seconds
when the key will be deleted from the store.
@type expireTime: C{int}
@return: a deferred that will fire with C{True} if the operation has
succeeded.
@rtype: L{Deferred}
"""
return self._set("set", key, val, flags, expireTime, "")
def checkAndSet(self, key, val, cas, flags=0, expireTime=0):
"""
Change the content of C{key} only if the C{cas} value matches the
current one associated with the key. Use this to store a value which
hasn't been modified since last time you fetched it.
@param key: The key to set.
@type key: C{str}
@param val: The value associated with the key.
@type val: C{str}
@param cas: Unique 64-bit value returned by previous call of C{get}.
@type cas: C{str}
@param flags: The flags to store with the key.
@type flags: C{int}
@param expireTime: If different from 0, the relative time in seconds
when the key will be deleted from the store.
@type expireTime: C{int}
@return: A deferred that will fire with C{True} if the operation has
succeeded, C{False} otherwise.
@rtype: L{Deferred}
"""
return self._set("cas", key, val, flags, expireTime, cas)
def _set(self, cmd, key, val, flags, expireTime, cas):
"""
Internal wrapper for setting values.
"""
if self._disconnected:
return fail(RuntimeError("not connected"))
if not isinstance(key, str):
return fail(ClientError(
"Invalid type for key: %s, expecting a string" % (type(key),)))
if len(key) > self.MAX_KEY_LENGTH:
return fail(ClientError("Key too long"))
if not isinstance(val, str):
return fail(ClientError(
"Invalid type for value: %s, expecting a string" %
(type(val),)))
if cas:
cas = " " + cas
length = len(val)
fullcmd = "%s %s %d %d %d%s" % (
cmd, key, flags, expireTime, length, cas)
self.sendLine(fullcmd)
self.sendLine(val)
cmdObj = Command(cmd, key=key, flags=flags, length=length)
self._current.append(cmdObj)
return cmdObj._deferred
def append(self, key, val):
"""
Append given data to the value of an existing key.
@param key: The key to modify.
@type key: C{str}
@param val: The value to append to the current value associated with
the key.
@type val: C{str}
@return: A deferred that will fire with C{True} if the operation has
succeeded, C{False} otherwise.
@rtype: L{Deferred}
"""
# Even if flags and expTime values are ignored, we have to pass them
return self._set("append", key, val, 0, 0, "")
def prepend(self, key, val):
"""
Prepend given data to the value of an existing key.
@param key: The key to modify.
@type key: C{str}
@param val: The value to prepend to the current value associated with
the key.
@type val: C{str}
@return: A deferred that will fire with C{True} if the operation has
succeeded, C{False} otherwise.
@rtype: L{Deferred}
"""
# Even if flags and expTime values are ignored, we have to pass them
return self._set("prepend", key, val, 0, 0, "")
def get(self, key, withIdentifier=False):
"""
Get the given C{key}. It doesn't support multiple keys. If
C{withIdentifier} is set to C{True}, the command issued is a C{gets},
that will return the current identifier associated with the value. This
identifier has to be used when issuing C{checkAndSet} update later,
using the corresponding method.
@param key: The key to retrieve.
@type key: C{str}
@param withIdentifier: If set to C{True}, retrieve the current
identifier along with the value and the flags.
@type withIdentifier: C{bool}
@return: A deferred that will fire with the tuple (flags, value) if
C{withIdentifier} is C{False}, or (flags, cas identifier, value)
if C{True}. If the server indicates there is no value
associated with C{key}, the returned value will be C{None} and
the returned flags will be C{0}.
@rtype: L{Deferred}
"""
return self._get([key], withIdentifier, False)
def getMultiple(self, keys, withIdentifier=False):
"""
Get the given list of C{keys}. If C{withIdentifier} is set to C{True},
the command issued is a C{gets}, that will return the identifiers
associated with each values. This identifier has to be used when
issuing C{checkAndSet} update later, using the corresponding method.
@param keys: The keys to retrieve.
@type keys: C{list} of C{str}
@param withIdentifier: If set to C{True}, retrieve the identifiers
along with the values and the flags.
@type withIdentifier: C{bool}
@return: A deferred that will fire with a dictionary with the elements
of C{keys} as keys and the tuples (flags, value) as values if
C{withIdentifier} is C{False}, or (flags, cas identifier, value) if
C{True}. If the server indicates there is no value associated with
C{key}, the returned values will be C{None} and the returned flags
will be C{0}.
@rtype: L{Deferred}
@since: 9.0
"""
return self._get(keys, withIdentifier, True)
def _get(self, keys, withIdentifier, multiple):
"""
Helper method for C{get} and C{getMultiple}.
"""
if self._disconnected:
return fail(RuntimeError("not connected"))
for key in keys:
if not isinstance(key, str):
return fail(ClientError(
"Invalid type for key: %s, expecting a string" % (type(key),)))
if len(key) > self.MAX_KEY_LENGTH:
return fail(ClientError("Key too long"))
if withIdentifier:
cmd = "gets"
else:
cmd = "get"
fullcmd = "%s %s" % (cmd, " ".join(keys))
self.sendLine(fullcmd)
if multiple:
values = dict([(key, (0, "", None)) for key in keys])
cmdObj = Command(cmd, keys=keys, values=values, multiple=True)
else:
cmdObj = Command(cmd, key=keys[0], value=None, flags=0, cas="",
multiple=False)
self._current.append(cmdObj)
return cmdObj._deferred
def stats(self, arg=None):
"""
Get some stats from the server. It will be available as a dict.
@param arg: An optional additional string which will be sent along
with the I{stats} command. The interpretation of this value by
the server is left undefined by the memcache protocol
specification.
@type arg: L{NoneType} or L{str}
@return: a deferred that will fire with a C{dict} of the available
statistics.
@rtype: L{Deferred}
"""
if arg:
cmd = "stats " + arg
else:
cmd = "stats"
if self._disconnected:
return fail(RuntimeError("not connected"))
self.sendLine(cmd)
cmdObj = Command("stats", values={})
self._current.append(cmdObj)
return cmdObj._deferred
def version(self):
"""
Get the version of the server.
@return: a deferred that will fire with the string value of the
version.
@rtype: L{Deferred}
"""
if self._disconnected:
return fail(RuntimeError("not connected"))
self.sendLine("version")
cmdObj = Command("version")
self._current.append(cmdObj)
return cmdObj._deferred
def delete(self, key):
"""
Delete an existing C{key}.
@param key: the key to delete.
@type key: C{str}
@return: a deferred that will be called back with C{True} if the key
was successfully deleted, or C{False} if not.
@rtype: L{Deferred}
"""
if self._disconnected:
return fail(RuntimeError("not connected"))
if not isinstance(key, str):
return fail(ClientError(
"Invalid type for key: %s, expecting a string" % (type(key),)))
self.sendLine("delete %s" % key)
cmdObj = Command("delete", key=key)
self._current.append(cmdObj)
return cmdObj._deferred
def flushAll(self):
"""
Flush all cached values.
@return: a deferred that will be called back with C{True} when the
operation has succeeded.
@rtype: L{Deferred}
"""
if self._disconnected:
return fail(RuntimeError("not connected"))
self.sendLine("flush_all")
cmdObj = Command("flush_all")
self._current.append(cmdObj)
return cmdObj._deferred
__all__ = ["MemCacheProtocol", "DEFAULT_PORT", "NoSuchCommand", "ClientError",
"ServerError"]
|
apache-2.0
|
barseghyanartur/django-extensions
|
django_extensions/management/commands/passwd.py
|
27
|
1306
|
import getpass
from django.core.management.base import BaseCommand, CommandError
from django_extensions.compat import get_user_model
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Clone of the UNIX program ``passwd'', for django.contrib.auth."
requires_system_checks = False
@signalcommand
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("need exactly one or zero arguments for username")
if args:
username, = args
else:
username = getpass.getuser()
User = get_user_model()
try:
u = User.objects.get(username=username)
except User.DoesNotExist:
raise CommandError("user %s does not exist" % username)
print("Changing password for user: %s" % u.username)
p1 = p2 = ""
while "" in (p1, p2) or p1 != p2:
p1 = getpass.getpass()
p2 = getpass.getpass("Password (again): ")
if p1 != p2:
print("Passwords do not match, try again")
elif "" in (p1, p2):
raise CommandError("aborted")
u.set_password(p1)
u.save()
return "Password changed successfully for user %s\n" % u.username
|
mit
|
mikemow/youtube-dl
|
youtube_dl/extractor/wdr.py
|
110
|
10166
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
from ..utils import (
determine_ext,
unified_strdate,
)
class WDRIE(InfoExtractor):
_PLAYER_REGEX = '-(?:video|audio)player(?:_size-[LMS])?'
_VALID_URL = r'(?P<url>https?://www\d?\.(?:wdr\d?|funkhauseuropa)\.de/)(?P<id>.+?)(?P<player>%s)?\.html' % _PLAYER_REGEX
_TESTS = [
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/servicezeit/videoservicezeit560-videoplayer_size-L.html',
'info_dict': {
'id': 'mdb-362427',
'ext': 'flv',
'title': 'Servicezeit',
'description': 'md5:c8f43e5e815eeb54d0b96df2fba906cb',
'upload_date': '20140310',
'is_live': False
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www1.wdr.de/themen/av/videomargaspiegelisttot101-videoplayer.html',
'info_dict': {
'id': 'mdb-363194',
'ext': 'flv',
'title': 'Marga Spiegel ist tot',
'description': 'md5:2309992a6716c347891c045be50992e4',
'upload_date': '20140311',
'is_live': False
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://www1.wdr.de/themen/kultur/audioerlebtegeschichtenmargaspiegel100-audioplayer.html',
'md5': '83e9e8fefad36f357278759870805898',
'info_dict': {
'id': 'mdb-194332',
'ext': 'mp3',
'title': 'Erlebte Geschichten: Marga Spiegel (29.11.2009)',
'description': 'md5:2309992a6716c347891c045be50992e4',
'upload_date': '20091129',
'is_live': False
},
},
{
'url': 'http://www.funkhauseuropa.de/av/audioflaviacoelhoamaramar100-audioplayer.html',
'md5': '99a1443ff29af19f6c52cf6f4dc1f4aa',
'info_dict': {
'id': 'mdb-478135',
'ext': 'mp3',
'title': 'Flavia Coelho: Amar é Amar',
'description': 'md5:7b29e97e10dfb6e265238b32fa35b23a',
'upload_date': '20140717',
'is_live': False
},
},
{
'url': 'http://www1.wdr.de/mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100.html',
'playlist_mincount': 146,
'info_dict': {
'id': 'mediathek/video/sendungen/quarks_und_co/filterseite-quarks-und-co100',
}
},
{
'url': 'http://www1.wdr.de/mediathek/video/livestream/index.html',
'info_dict': {
'id': 'mdb-103364',
'title': 're:^WDR Fernsehen [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:ae2ff888510623bf8d4b115f95a9b7c9',
'ext': 'flv',
'upload_date': '20150212',
'is_live': True
},
'params': {
'skip_download': True,
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
page_url = mobj.group('url')
page_id = mobj.group('id')
webpage = self._download_webpage(url, page_id)
if mobj.group('player') is None:
entries = [
self.url_result(page_url + href, 'WDR')
for href in re.findall(r'<a href="/?(.+?%s\.html)" rel="nofollow"' % self._PLAYER_REGEX, webpage)
]
if entries: # Playlist page
return self.playlist_result(entries, page_id)
# Overview page
entries = []
for page_num in itertools.count(2):
hrefs = re.findall(
r'<li class="mediathekvideo"\s*>\s*<img[^>]*>\s*<a href="(/mediathek/video/[^"]+)"',
webpage)
entries.extend(
self.url_result(page_url + href, 'WDR')
for href in hrefs)
next_url_m = re.search(
r'<li class="nextToLast">\s*<a href="([^"]+)"', webpage)
if not next_url_m:
break
next_url = page_url + next_url_m.group(1)
webpage = self._download_webpage(
next_url, page_id,
note='Downloading playlist page %d' % page_num)
return self.playlist_result(entries, page_id)
flashvars = compat_parse_qs(
self._html_search_regex(r'<param name="flashvars" value="([^"]+)"', webpage, 'flashvars'))
page_id = flashvars['trackerClipId'][0]
video_url = flashvars['dslSrc'][0]
title = flashvars['trackerClipTitle'][0]
thumbnail = flashvars['startPicture'][0] if 'startPicture' in flashvars else None
is_live = flashvars.get('isLive', ['0'])[0] == '1'
if is_live:
title = self._live_title(title)
if 'trackerClipAirTime' in flashvars:
upload_date = flashvars['trackerClipAirTime'][0]
else:
upload_date = self._html_search_meta('DC.Date', webpage, 'upload date')
if upload_date:
upload_date = unified_strdate(upload_date)
if video_url.endswith('.f4m'):
video_url += '?hdcore=3.2.0&plugin=aasp-3.2.0.77.18'
ext = 'flv'
elif video_url.endswith('.smil'):
fmt = self._extract_smil_formats(video_url, page_id)[0]
video_url = fmt['url']
sep = '&' if '?' in video_url else '?'
video_url += sep
video_url += 'hdcore=3.3.0&plugin=aasp-3.3.0.99.43'
ext = fmt['ext']
else:
ext = determine_ext(video_url)
description = self._html_search_meta('Description', webpage, 'description')
return {
'id': page_id,
'url': video_url,
'ext': ext,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'is_live': is_live
}
class WDRMobileIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://mobile-ondemand\.wdr\.de/
.*?/fsk(?P<age_limit>[0-9]+)
/[0-9]+/[0-9]+/
(?P<id>[0-9]+)_(?P<title>[0-9]+)'''
IE_NAME = 'wdr:mobile'
_TEST = {
'url': 'http://mobile-ondemand.wdr.de/CMS2010/mdb/ondemand/weltweit/fsk0/42/421735/421735_4283021.mp4',
'info_dict': {
'title': '4283021',
'id': '421735',
'ext': 'mp4',
'age_limit': 0,
},
'skip': 'Problems with loading data.'
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
return {
'id': mobj.group('id'),
'title': mobj.group('title'),
'age_limit': int(mobj.group('age_limit')),
'url': url,
'http_headers': {
'User-Agent': 'mobile',
},
}
class WDRMausIE(InfoExtractor):
_VALID_URL = 'http://(?:www\.)?wdrmaus\.de/(?:[^/]+/){,2}(?P<id>[^/?#]+)(?:/index\.php5|(?<!index)\.php5|/(?:$|[?#]))'
IE_DESC = 'Sendung mit der Maus'
_TESTS = [{
'url': 'http://www.wdrmaus.de/aktuelle-sendung/index.php5',
'info_dict': {
'id': 'aktuelle-sendung',
'ext': 'mp4',
'thumbnail': 're:^http://.+\.jpg',
'upload_date': 're:^[0-9]{8}$',
'title': 're:^[0-9.]{10} - Aktuelle Sendung$',
}
}, {
'url': 'http://www.wdrmaus.de/sachgeschichten/sachgeschichten/40_jahre_maus.php5',
'md5': '3b1227ca3ed28d73ec5737c65743b2a3',
'info_dict': {
'id': '40_jahre_maus',
'ext': 'mp4',
'thumbnail': 're:^http://.+\.jpg',
'upload_date': '20131007',
'title': '12.03.2011 - 40 Jahre Maus',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
param_code = self._html_search_regex(
r'<a href="\?startVideo=1&([^"]+)"', webpage, 'parameters')
title_date = self._search_regex(
r'<div class="sendedatum"><p>Sendedatum:\s*([0-9\.]+)</p>',
webpage, 'air date')
title_str = self._html_search_regex(
r'<h1>(.*?)</h1>', webpage, 'title')
title = '%s - %s' % (title_date, title_str)
upload_date = unified_strdate(
self._html_search_meta('dc.date', webpage))
fields = compat_parse_qs(param_code)
video_url = fields['firstVideo'][0]
thumbnail = compat_urlparse.urljoin(url, fields['startPicture'][0])
formats = [{
'format_id': 'rtmp',
'url': video_url,
}]
jscode = self._download_webpage(
'http://www.wdrmaus.de/codebase/js/extended-medien.min.js',
video_id, fatal=False,
note='Downloading URL translation table',
errnote='Could not download URL translation table')
if jscode:
for m in re.finditer(
r"stream:\s*'dslSrc=(?P<stream>[^']+)',\s*download:\s*'(?P<dl>[^']+)'\s*\}",
jscode):
if video_url.startswith(m.group('stream')):
http_url = video_url.replace(
m.group('stream'), m.group('dl'))
formats.append({
'format_id': 'http',
'url': http_url,
})
break
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'upload_date': upload_date,
}
|
unlicense
|
makson96/Recultis
|
games/doom3/game.py
|
2
|
2138
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
##This software is available to you under the terms of the GPL-3, see "/usr/share/common-licenses/GPL-3".
##Copyright:
##- Tomasz Makarewicz ([email protected])
import os, shutil
from subprocess import check_output
recultis_dir = os.getenv("HOME") + "/.recultis/"
self_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
install_dir = recultis_dir + "doom3/"
desk_dir = str(check_output(['xdg-user-dir', 'DESKTOP']))[2:-3]
full_name = "Doom 3 BFG on RBDOOM-3-BFG engine"
description = """Doom 3: BFG is the remaster of classic Doom 3 with all expansions. It
features enhanced graphic and audio to original game. Doom 3 is one of
the best FPS games of all time. Unfortunately, it was never released
on Linux, but game engine was release open source. With many
enhancements and new features, game is now available on Linux and it
is better than ever before. Recultis uses RBDOOM-3-BFG flavor of the
engine and requires game to be present in your Steam Library.
"""
shops = ["steam"]
s_appid = "208200"
steam_link = "http://store.steampowered.com/app/"+s_appid+"/"
screenshot_path = self_dir + "../../assets/html/rbdoom3-screen.png"
icon1_name = "rbdoom-3-bfg.png"
icon_list = [icon1_name]
engine = "rbdoom-3-bfg"
runtime_version = 2
env_var = "LD_LIBRARY_PATH=$HOME/.recultis/runtime/recultis" + str(runtime_version) + ":$HOME/.recultis/runtime/recultis" + str(runtime_version) + "/custom"
launcher1_cmd = "bash -c 'cd $HOME/.recultis/doom3/; " + env_var + " ./RBDoom3BFG'"
launcher_cmd_list = [["Doom3 BFG", launcher1_cmd]]
launcher1_text = """[Desktop Entry]
Type=Application
Name=Doom 3 BFG
Comment=Play Doom 3 BFG
Exec=""" + launcher1_cmd + """
Icon=""" + icon1_name + """
Categories=Game;
Terminal=false
"""
launcher_list = [["doom3.desktop", launcher1_text]]
uninstall_files_list = []
uninstall_dir_list = []
def prepare_engine():
print("Prepare game engine")
try:
os.remove(install_dir + "RBDoom3BFG")
shutil.rmtree(install_dir + "lib")
except:
pass
shutil.copy(recultis_dir + "tmp/rbdoom-3-bfg/RBDoom3BFG", install_dir + "RBDoom3BFG")
print("Game engine ready")
|
gpl-3.0
|
Daniel-CA/odoomrp-wip-public
|
stock_picking_package_info/models/stock_quant.py
|
16
|
4961
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api
class StockQuant(models.Model):
_inherit = 'stock.quant'
@api.multi
@api.depends('product_id', 'qty', 'product_id.weight',
'product_id.weight_net', 'product_id.volume')
def _compute_totals(self):
for record in self:
record.total_weight = record.product_id.weight * record.qty
record.total_weight_net = record.product_id.weight_net * record.qty
record.total_volume = record.product_id.volume * record.qty
weight = fields.Float(string='Weight', related='product_id.weight')
weight_net = fields.Float(
string='Net Weight', related='product_id.weight_net')
volume = fields.Float(
string='Volume', related='product_id.volume')
total_weight = fields.Float(
string='Total weight', compute='_compute_totals')
total_weight_net = fields.Float(
string='Total Net weight', compute='_compute_totals')
total_volume = fields.Float(
string='Total Volume', compute='_compute_totals')
class StockQuantPackage(models.Model):
_inherit = 'stock.quant.package'
@api.multi
@api.depends('height', 'width', 'length', 'quant_ids',
'quant_ids.total_weight', 'quant_ids.total_weight_net',
'quant_ids.total_volume', 'children_ids',
'children_ids.total_weight', 'children_ids.empty_weight',
'children_ids.total_weight_net', 'children_ids.total_volume')
def _compute_totals(self):
for record in self:
record.total_weight = sum(x.total_weight for x in record.quant_ids)
record.total_weight_net = sum(x.total_weight_net for x in
record.quant_ids)
record.total_volume = sum(x.total_volume for x in record.quant_ids)
record.total_estim_weight = (
record.total_weight + record.empty_weight +
sum(x.total_weight + x.empty_weight for x in
record.children_ids))
record.real_weight = record.total_estim_weight
record.total_estim_weight_net = (
record.total_weight_net + record.empty_weight +
sum(x.total_weight_net + x.empty_weight for x in
record.children_ids))
record.tvolume_charge = (
record.total_volume + sum(x.total_volume for x in
record.children_ids))
record.permitted_volume = (
record.height * record.width * record.length)
@api.multi
def _compute_pickings(self):
for record in self:
record.pickings = self.env['stock.pack.operation'].search(
[('result_package_id', '=', record.id)]).mapped('picking_id')
height = fields.Float(string='Height', help='The height of the package')
width = fields.Float(string='Width', help='The width of the package')
length = fields.Float(string='Length', help='The length of the package')
empty_weight = fields.Float(string='Empty Package Weight')
real_weight = fields.Float(string='Real Weight')
total_weight = fields.Float(
string='Total Weight', compute='_compute_totals')
total_weight_net = fields.Float(
string='Total Net Weight', compute='_compute_totals')
total_volume = fields.Float(
string='Total Volume', compute='_compute_totals')
total_estim_weight = fields.Float(
string='Total Estimated Weight', compute='_compute_totals')
total_estim_weight_net = fields.Float(
string='Total Estimated Net Weight', compute='_compute_totals')
permitted_volume = fields.Float(
string='Permitted volume', compute='_compute_totals')
tvolume_charge = fields.Float(
string='Total volume charge', compute='_compute_totals')
pickings = fields.Many2many(
comodel_name='stock.picking', string='Pickings',
compute='_compute_pickings')
@api.multi
@api.onchange('ul_id')
def onchange_ul_id(self):
for record in self:
record.height = record.ul_id.height
record.width = record.ul_id.width
record.length = record.ul_id.length
record.empty_weight = record.ul_id.weight
@api.model
def create(self, values):
if values.get('ul_id', False):
ul = self.env['product.ul'].browse(values.get('ul_id'))
values.update({
'height': ul.height,
'width': ul.width,
'length': ul.length,
'empty_weight': ul.weight,
})
return super(StockQuantPackage, self).create(values)
|
agpl-3.0
|
MangoMangoDevelopment/neptune
|
lib/ros_comm-1.12.0/tools/rosconsole/scripts/generate_speed_test.py
|
1
|
5464
|
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
f = open('%s/../test/speed_test.cpp' % (os.path.dirname(__file__)), 'w')
f.write("// !!!!!!!!!!!!!!!!!!!!!!! This is a generated file, do not edit manually\n\n")
f.write('/*\n')
f.write(' * Copyright (c) 2008, Willow Garage, Inc.\n')
f.write(' * All rights reserved.\n')
f.write(' *\n')
f.write(' * Redistribution and use in source and binary forms, with or without\n')
f.write(' * modification, are permitted provided that the following conditions are met:\n')
f.write(' *\n')
f.write(' * * Redistributions of source code must retain the above copyright\n')
f.write(' * notice, this list of conditions and the following disclaimer.\n')
f.write(' * * Redistributions in binary form must reproduce the above copyright\n')
f.write(' * notice, this list of conditions and the following disclaimer in the\n')
f.write(' * documentation and/or other materials provided with the distribution.\n')
f.write(' * * Neither the name of Willow Garage, Inc. nor the names of its\n')
f.write(' * contributors may be used to endorse or promote products derived from\n')
f.write(' * this software without specific prior written permission.\n')
f.write(' *\n')
f.write(' * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"\n')
f.write(' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n')
f.write(' * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n')
f.write(' * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n')
f.write(' * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n')
f.write(' * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n')
f.write(' * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n')
f.write(' * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n')
f.write(' * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n')
f.write(' * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n')
f.write(' * POSSIBILITY OF SUCH DAMAGE.\n')
f.write(' */\n\n')
f.write('#include "ros/console.h"\n')
f.write('#include "log4cxx/appenderskeleton.h"\n')
#for i in range(0,int(sys.argv[1])):
# f.write('void info%s(int i) { ROS_INFO("Info%s: %%d", i); }\n' %(i,i))
# f.write('void warn%s(int i) { ROS_WARN("Warn%s: %%d", i); }\n' %(i,i))
# f.write('void error%s(int i) { ROS_ERROR("Error%s: %%d", i); }\n' %(i,i))
# f.write('void debug%s(int i) { ROS_DEBUG("Debug%s: %%d", i); }\n' %(i,i))
# f.write('void errorr%s(int i) { ROS_ERROR("Error2%s: %%d", i); }\n' %(i,i))
f.write('class NullAppender : public log4cxx::AppenderSkeleton {\n')
f.write('protected:\n')
f.write('virtual void append(const log4cxx::spi::LoggingEventPtr& event, log4cxx::helpers::Pool& pool){printf("blah\\n");}\n')
f.write('virtual void close() {}\n')
f.write('virtual bool requiresLayout() const { return false; } };\n')
f.write('int main(int argc, char** argv)\n{\n')
f.write('ROSCONSOLE_AUTOINIT; \nlog4cxx::Logger::getLogger(ROSCONSOLE_ROOT_LOGGER_NAME)->removeAllAppenders();\n')
f.write('log4cxx::Logger::getLogger(ROSCONSOLE_DEFAULT_NAME)->addAppender(new NullAppender);\n')
f.write('log4cxx::Logger::getLogger(ROSCONSOLE_DEFAULT_NAME)->setLevel(log4cxx::Level::getFatal());\n')
f.write('for (int i = 0;i < %s; ++i)\n{\n' %(sys.argv[2]))
for i in range(0,int(sys.argv[1])):
#f.write('info%s(i);\n' %(i))
#f.write('warn%s(i);\n' %(i))
#f.write('error%s(i);\n' %(i))
#f.write('debug%s(i);\n' %(i))
#f.write('errorr%s(i);\n' %(i))
f.write('ROS_INFO("test");')
f.write('ROS_WARN("test");')
f.write('ROS_ERROR("test");')
f.write('ROS_DEBUG("test");')
f.write('ROS_ERROR("test");')
f.write('}\n')
f.write('}\n')
|
bsd-3-clause
|
12019/python-gsmmodem
|
gsmmodem/exceptions.py
|
13
|
4484
|
""" Module defines exceptions used by gsmmodem """
class GsmModemException(Exception):
""" Base exception raised for error conditions when interacting with the GSM modem """
class TimeoutException(GsmModemException):
""" Raised when a write command times out """
def __init__(self, data=None):
""" @param data: Any data that was read was read before timeout occurred (if applicable) """
super(TimeoutException, self).__init__(data)
self.data = data
class InvalidStateException(GsmModemException):
""" Raised when an API method call is invoked on an object that is in an incorrect state """
class InterruptedException(InvalidStateException):
""" Raised when execution of an AT command is interrupt by a state change.
May contain another exception that was the cause of the interruption """
def __init__(self, message, cause=None):
""" @param cause: the exception that caused this interruption (usually a CmeError) """
super(InterruptedException, self).__init__(message)
self.cause = cause
class CommandError(GsmModemException):
""" Raised if the modem returns an error in response to an AT command
May optionally include an error type (CME or CMS) and -code (error-specific).
"""
_description = ''
def __init__(self, command=None, type=None, code=None):
self.command = command
self.type = type
self.code = code
if type != None and code != None:
super(CommandError, self).__init__('{0} {1}{2}'.format(type, code, ' ({0})'.format(self._description) if len(self._description) > 0 else ''))
elif command != None:
super(CommandError, self).__init__(command)
else:
super(CommandError, self).__init__()
class CmeError(CommandError):
""" ME error result code : +CME ERROR: <error>
Issued in response to an AT command
"""
def __new__(cls, *args, **kwargs):
# Return a specialized version of this class if possible
if len(args) >= 2:
code = args[1]
if code == 11:
return PinRequiredError(args[0])
elif code == 16:
return IncorrectPinError(args[0])
elif code == 12:
return PukRequiredError(args[0])
return super(CmeError, cls).__new__(cls, *args, **kwargs)
def __init__(self, command, code):
super(CmeError, self).__init__(command, 'CME', code)
class SecurityException(CmeError):
""" Security-related CME error """
def __init__(self, command, code):
super(SecurityException, self).__init__(command, code)
class PinRequiredError(SecurityException):
""" Raised if an operation failed because the SIM card's PIN has not been entered """
_description = 'SIM card PIN is required'
def __init__(self, command, code=11):
super(PinRequiredError, self).__init__(command, code)
class IncorrectPinError(SecurityException):
""" Raised if an incorrect PIN is entered """
_description = 'Incorrect PIN entered'
def __init__(self, command, code=16):
super(IncorrectPinError, self).__init__(command, code)
class PukRequiredError(SecurityException):
""" Raised an operation failed because the SIM card's PUK is required (SIM locked) """
_description = "PUK required (SIM locked)"
def __init__(self, command, code=12):
super(PukRequiredError, self).__init__(command, code)
class CmsError(CommandError):
""" Message service failure result code: +CMS ERROR : <er>
Issued in response to an AT command
"""
def __new__(cls, *args, **kwargs):
# Return a specialized version of this class if possible
if len(args) >= 2:
code = args[1]
if code == 330:
return SmscNumberUnknownError(args[0])
return super(CmsError, cls).__new__(cls, *args, **kwargs)
def __init__(self, command, code):
super(CmsError, self).__init__(command, 'CMS', code)
class SmscNumberUnknownError(CmsError):
""" Raised if the SMSC (service centre) address is missing when trying to send an SMS message """
_description = 'SMSC number not set'
def __init__(self, command, code=330):
super(SmscNumberUnknownError, self).__init__(command, code)
class EncodingError(GsmModemException):
""" Raised if a decoding- or encoding operation failed """
|
lgpl-3.0
|
jasperges/blenderseed
|
__init__.py
|
1
|
2505
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2017 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
bl_info = {
"name": "appleseed",
"author": "The appleseedhq Organization",
"version": (0, 3, 7),
"blender": (2, 7, 1),
"location": "Info Header (Render Engine Menu)",
"description": "appleseed Render Engine",
"warning": "",
"wiki_url": "https://github.com/appleseedhq/blenderseed/wiki",
"tracker_url": "https://github.com/appleseedhq/blenderseed/issues",
"category": "Render"}
if "bpy" in locals():
import imp
imp.reload(properties)
imp.reload(operators)
imp.reload(export)
imp.reload(ui)
imp.reload(render)
imp.reload(util)
imp.reload(preferences)
imp.reload(project_file_writer)
else:
import bpy
from . import properties
from . import operators
from . import export
from . import ui
from . import render
from . import preferences
import bpy
def register():
properties.register()
operators.register()
export.register()
ui.register()
preferences.register()
bpy.utils.register_module(__name__)
def unregister():
properties.unregister()
operators.unregister()
export.unregister()
ui.unregister()
preferences.unregister()
bpy.utils.unregister_module(__name__)
|
mit
|
steingabelgaard/odoo-infrastructure
|
infrastructure_contract/database.py
|
1
|
2074
|
#-*- coding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import Warning
from urlparse import urlparse
class database(models.Model):
_inherit = "infrastructure.database"
contract_id = fields.Many2one(
'account.analytic.account',
string='Contract',
domain="[('partner_id','child_of',partner_id),('state','=','open')]",
)
@api.one
def upload_contract_data(self):
client = self.get_client()
modules = ['web_support_client']
for module in modules:
if client.modules(name=module, installed=True) is None:
raise Warning(
_("You can not Upload a Contract if module '%s' is not\
installed in the database") % (module))
if not self.contract_id:
raise Warning(
_("You can not Upload a Contract if not contracted is linked"))
imp_fields = [
'id',
'name',
'user',
'database',
'server_host',
'number']
['self.asd', ]
commercial_partner = self.contract_id.partner_id.commercial_partner_id
server_host = urlparse(
self.env['ir.config_parameter'].get_param('web.base.url')).netloc
# search for user related to commercial partner
user = self.env['res.users'].search([(
'partner_id', '=', commercial_partner.id)], limit=1)
if not user:
user = user.search([(
'partner_id', 'child_of', commercial_partner.id)])
if not user:
raise Warning(
_("You can not Upload a Contract if there is not user related\
to the contract Partner"))
rows = [[
'infrastructure_contract.contract_id_%i' % self.contract_id.id,
self.contract_id.name,
user.login,
self._cr.dbname,
server_host,
self.contract_id.code,
]]
client.model('support.contract').load(imp_fields, rows)
|
agpl-3.0
|
carthagecollege/django-djforms
|
djforms/polisci/model_united_nations/forms.py
|
2
|
4544
|
# -*- coding: utf-8 -*-
from django import forms
from djforms.polisci.model_united_nations.models import Attender
from djforms.polisci.model_united_nations.models import Country
from djforms.core.models import STATE_CHOICES, PAYMENT_CHOICES
from djforms.processors.models import Order
from djtools.fields.localflavor import USPhoneNumberField
from localflavor.us.forms import USZipCodeField
DELEGATIONS = (
('', '----'),
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
)
COUNTRIES = Country.objects.filter(status=True).order_by("name")
class AttenderForm(forms.ModelForm):
"""
A form to collect registration data for the Model United Nations
"""
first_name = forms.CharField(
max_length=128, label="Faculty advisor first name"
)
last_name = forms.CharField(
max_length=128
)
city = forms.CharField(
max_length=128,
required=True
)
address1 = forms.CharField(
max_length=128,
label = "Address",
required=True
)
state = forms.CharField(
widget=forms.Select(choices=STATE_CHOICES), required=True
)
postal_code = USZipCodeField(label="Zip Code")
phone = USPhoneNumberField(
help_text="Format: XXX-XXX-XXXX"
)
number_of_del = forms.TypedChoiceField(
choices=DELEGATIONS, label="Number of delegations"
)
comments = forms.CharField(
label="Questions/Comments",
help_text="""
Feel free to list alternate countries in the space above
(include your choice and delegation number)
""",
widget=forms.Textarea, required=False
)
class Meta:
model = Attender
exclude = (
'country','order','second_name','previous_name','salutation',
'delegation_1','delegation_2','delegation_3','delegation_4',
'delegation_5'
)
class CountryForm(forms.Form):
"""
Delegation countries
"""
delegation_1 = forms.ModelChoiceField(
queryset=COUNTRIES,
required=False
)
delegation_2 = forms.ModelChoiceField(
queryset=COUNTRIES,
required=False
)
delegation_3 = forms.ModelChoiceField(
queryset=COUNTRIES,
required=False
)
delegation_4 = forms.ModelChoiceField(
queryset=COUNTRIES,
required=False
)
delegation_5 = forms.ModelChoiceField(
queryset=COUNTRIES,
required=False
)
def clean(self):
"""
stackoverflow.com/questions/9835762/find-and-list-duplicates-in-python-list
"""
super(CountryForm, self).clean()
cd = self.cleaned_data
seen = set()
seen_add = seen.add
paises = [
cd.get("delegation_1"),cd.get("delegation_2"),
cd.get("delegation_3"),cd.get("delegation_4"),
cd.get("delegation_5")
]
# adds all elements it doesn't know yet to seen
# and all other to seen_twice
seen_twice = set( x for x in paises if x is not None and x in seen or seen_add(x) )
# turn the set into a list (as requested)
dupes = list( seen_twice )
if len(dupes) > 0:
raise forms.ValidationError(
"You have choosen the same country in more than one delegation."
)
clist = list(set(paises))
if len(clist) == 1 and clist[0] == None:
raise forms.ValidationError(
"You must assign a country to at least one delegation."
)
return self.cleaned_data
'''
# requires python 2.7
def clean(self):
from collections import Counter
mylist = [20, 30, 25, 20]
[k for k,v in Counter(mylist).items() if v>1]
# returns [20]
for k,v in Counter(mylist).items():
if v>1:
print k
# for python 2.6
l = [1,2,3,4,4,5,5,6,1]
list(set([x for x in l if l.count(x) > 1]))
'''
class OrderForm(forms.ModelForm):
"""
Payment choices and total
"""
payment_method = forms.TypedChoiceField(
choices=PAYMENT_CHOICES,
widget=forms.RadioSelect()
)
total = forms.CharField(
max_length=7,
label="Registration Fee"
)
class Meta:
model = Order
fields = ('total',)
|
unlicense
|
psb-seclab/MAdKiller
|
pywebsocket-0.7.9/src/example/close_wsh.py
|
495
|
2835
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
from mod_pywebsocket import common
from mod_pywebsocket import stream
def web_socket_do_extra_handshake(request):
pass
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
code, reason = line.split(' ', 1)
if code is None or reason is None:
return
request.ws_stream.close_connection(int(code), reason)
# close_connection() initiates closing handshake. It validates code
# and reason. If you want to send a broken close frame for a test,
# following code will be useful.
# > data = struct.pack('!H', int(code)) + reason.encode('UTF-8')
# > request.connection.write(stream.create_close_frame(data))
# > # Suppress to re-respond client responding close frame.
# > raise Exception("customized server initiated closing handshake")
def web_socket_passive_closing_handshake(request):
# Simply echo a close status code
code, reason = request.ws_close_code, request.ws_close_reason
# pywebsocket sets pseudo code for receiving an empty body close frame.
if code == common.STATUS_NO_STATUS_RECEIVED:
code = None
reason = ''
return code, reason
# vi:sts=4 sw=4 et
|
gpl-2.0
|
Charlotte-Morgan/inasafe
|
safe/gui/tools/wizard/step_fc60_agglayer_from_browser.py
|
6
|
3715
|
# coding=utf-8
"""InaSAFE Wizard Step Aggregation Layer Browser."""
# noinspection PyPackageRequirements
from qgis.PyQt.QtGui import QPixmap
from safe import messaging as m
from safe.gui.tools.wizard.utilities import layers_intersect
from safe.gui.tools.wizard.wizard_step import get_wizard_step_ui_class
from safe.gui.tools.wizard.wizard_step_browser import WizardStepBrowser
from safe.utilities.i18n import tr
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
FORM_CLASS = get_wizard_step_ui_class(__file__)
class StepFcAggLayerFromBrowser(WizardStepBrowser, FORM_CLASS):
"""InaSAFE Wizard Step Aggregation Layer Browser."""
def __init__(self, parent=None):
"""Constructor for the tab.
:param parent: parent - widget to use as parent (Wizard Dialog).
:type parent: QWidget
"""
WizardStepBrowser.__init__(self, parent)
self.tvBrowserAggregation.setModel(self.proxy_model)
self.tvBrowserAggregation.selectionModel().selectionChanged.connect(
self.tvBrowserAggregation_selection_changed)
def is_ready_to_next_step(self):
"""Check if the step is complete.
If so, there is no reason to block the Next button.
:returns: True if new step may be enabled.
:rtype: bool
"""
return self.get_layer_description_from_browser('aggregation')[0]
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep instance or None
"""
if self.parent.is_selected_layer_keywordless:
# insert keyword creation thread here
self.parent.parent_step = self
self.parent.existing_keywords = None
self.parent.set_mode_label_to_keywords_creation()
new_step = self.parent.step_kw_purpose
else:
if layers_intersect(self.parent.exposure_layer,
self.parent.aggregation_layer):
new_step = self.parent.step_fc_summary
else:
new_step = self.parent.step_fc_agglayer_disjoint
return new_step
# noinspection PyPep8Naming
def tvBrowserAggregation_selection_changed(self):
"""Update layer description label."""
(is_compatible, desc) = self.get_layer_description_from_browser(
'aggregation')
self.lblDescribeBrowserAggLayer.setText(desc)
self.parent.pbnNext.setEnabled(is_compatible)
def set_widgets(self):
"""Set widgets on the Aggregation Layer From Browser tab."""
self.tvBrowserAggregation_selection_changed()
# Set icon
self.lblIconIFCWAggregationFromBrowser.setPixmap(QPixmap(None))
@property
def step_name(self):
"""Get the human friendly name for the wizard step.
:returns: The name of the wizard step.
:rtype: str
"""
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
return tr('Select Aggregation from Browser Step')
def help_content(self):
"""Return the content of help for this step wizard.
We only needs to re-implement this method in each wizard step.
:returns: A message object contains help.
:rtype: m.Message
"""
message = m.Message()
message.add(m.Paragraph(tr(
'In this wizard step: {step_name}, You can choose a aggregation '
'layer from the list of layers from local disk or postgres '
'database.').format(step_name=self.step_name)))
return message
|
gpl-3.0
|
bhilburn/gnuradio
|
gr-qtgui/examples/pyqt_example_c.py
|
38
|
6375
|
#!/usr/bin/env python
#
# Copyright 2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 1000
f2 = 2000
fftsize = 2048
self.qapp = QtGui.QApplication(sys.argv)
ss = open(gr.prefix() + '/share/gnuradio/themes/dark.qss')
sstext = ss.read()
ss.close()
self.qapp.setStyleSheet(sstext)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.001)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*fftsize)
self.snk1 = qtgui.sink_c(fftsize, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Signal Example",
True, True, True, False)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, self.snk1)
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
|
gpl-3.0
|
kingvuplus/enigma2
|
lib/python/Plugins/SystemPlugins/SoftwareManager/BackupRestore.py
|
2
|
23685
|
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.Console import Console
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Pixmap import Pixmap
from Tools.LoadPixmap import LoadPixmap
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Button import Button
from Components.config import getConfigListEntry, configfile, ConfigSelection, ConfigSubsection, ConfigText, ConfigLocations
from Components.config import config
from Components.ConfigList import ConfigList,ConfigListScreen
from Components.FileList import MultiFileSelectList
from Components.Network import iNetwork
from Plugins.Plugin import PluginDescriptor
from enigma import eTimer, eEnv, eConsoleAppContainer, eEPGCache
from Tools.Directories import *
from os import popen, path, makedirs, listdir, access, stat, rename, remove, W_OK, R_OK
from time import gmtime, strftime, localtime
from datetime import datetime
from boxbranding import getBoxType, getMachineBrand, getMachineName, getImageVersion, getImageDistro
boxtype = getBoxType()
imagename = getImageDistro()
imageversion = getImageVersion()
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname', eEnv.resolve("${datadir}/enigma2/keymap.usr"), '/etc/CCcam.cfg', '/usr/keys/'])
def getBackupPath():
backuppath = config.plugins.configurationbackup.backuplocation.value
if backuppath.endswith('/'):
return backuppath + 'backup_' + imagename + '_' + imageversion + '_' + boxtype
else:
return backuppath + '/backup_' + imagename + '_' + imageversion + '_' + boxtype
def getOldBackupPath():
backuppath = config.plugins.configurationbackup.backuplocation.value
if backuppath.endswith('/'):
return backuppath + 'backup'
else:
return backuppath + '/backup'
def getBackupFilename():
return "enigma2settingsbackup.tar.gz"
def SettingsEntry(name, checked):
if checked:
picture = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_on.png"));
else:
picture = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/icons/lock_off.png"));
return (name, picture, checked)
class BackupScreen(Screen, ConfigListScreen):
skin = """
<screen position="135,144" size="350,310" title="Backup is running" >
<widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, runBackup = False):
Screen.__init__(self, session)
self.session = session
self.runBackup = runBackup
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.close,
"back": self.close,
"cancel": self.close,
}, -1)
self.finished_cb = None
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.list = []
ConfigListScreen.__init__(self, self.list)
self.onLayoutFinish.append(self.layoutFinished)
if self.runBackup:
self.onShown.append(self.doBackup)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Backup is running..."))
def doBackup(self):
configfile.save()
if config.plugins.softwaremanager.epgcache.value:
eEPGCache.getInstance().save()
try:
if path.exists(self.backuppath) == False:
makedirs(self.backuppath)
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
if not "/tmp/installed-list.txt" in self.backupdirs:
self.backupdirs = self.backupdirs + " /tmp/installed-list.txt"
if not "/tmp/changed-configfiles.txt" in self.backupdirs:
self.backupdirs = self.backupdirs + " /tmp/changed-configfiles.txt"
cmd1 = "opkg list-installed | egrep 'enigma2-plugin-|task-base|packagegroup-base' > /tmp/installed-list.txt"
cmd2 = "opkg list-changed-conffiles > /tmp/changed-configfiles.txt"
cmd3 = "tar -czvf " + self.fullbackupfilename + " " + self.backupdirs
cmd = [cmd1, cmd2, cmd3]
if path.exists(self.fullbackupfilename):
dt = datetime.fromtimestamp(stat(self.fullbackupfilename).st_ctime).strftime('%Y-%m-%d-%H%M%S')
self.newfilename = self.backuppath + "/" + dt + '-' + self.backupfile
if path.exists(self.newfilename):
remove(self.newfilename)
rename(self.fullbackupfilename,self.newfilename)
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, Console, title = _("Backup is running..."), cmdlist = cmd,finishedCallback = self.backupFinishedCB,closeOnSuccess = True)
else:
self.session.open(Console, title = _("Backup is running..."), cmdlist = cmd,finishedCallback = self.backupFinishedCB, closeOnSuccess = True)
except OSError:
if self.finished_cb:
self.session.openWithCallback(self.finished_cb, MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10 )
else:
self.session.openWithCallback(self.backupErrorCB,MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10 )
def backupFinishedCB(self,retval = None):
self.close(True)
def backupErrorCB(self,retval = None):
self.close(False)
def runAsync(self, finished_cb):
self.finished_cb = finished_cb
self.doBackup()
class BackupSelection(Screen):
skin = """
<screen name="BackupSelection" position="center,center" size="560,400" title="Select files/folders to backup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="checkList" position="5,50" size="550,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["key_yellow"] = StaticText()
self["summary_description"] = StaticText("")
self.selectedFiles = config.plugins.configurationbackup.backupdirs.value
defaultDir = '/'
inhibitDirs = ["/bin", "/boot", "/dev", "/autofs", "/lib", "/proc", "/sbin", "/sys", "/hdd", "/tmp", "/mnt", "/media"]
self.filelist = MultiFileSelectList(self.selectedFiles, defaultDir, inhibitDirs = inhibitDirs )
self["checkList"] = self.filelist
self["actions"] = ActionMap(["DirectionActions", "OkCancelActions", "ShortcutActions"],
{
"cancel": self.exit,
"red": self.exit,
"yellow": self.changeSelectionState,
"green": self.saveSelection,
"ok": self.okClicked,
"left": self.left,
"right": self.right,
"down": self.down,
"up": self.up
}, -1)
if not self.selectionChanged in self["checkList"].onSelectionChanged:
self["checkList"].onSelectionChanged.append(self.selectionChanged)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
idx = 0
self["checkList"].moveToIndex(idx)
self.setWindowTitle()
self.selectionChanged()
def setWindowTitle(self):
self.setTitle(_("Select files/folders to backup"))
def selectionChanged(self):
current = self["checkList"].getCurrent()[0]
self["summary_description"].text = current[3]
if current[2] is True:
self["key_yellow"].setText(_("Deselect"))
else:
self["key_yellow"].setText(_("Select"))
def up(self):
self["checkList"].up()
def down(self):
self["checkList"].down()
def left(self):
self["checkList"].pageUp()
def right(self):
self["checkList"].pageDown()
def changeSelectionState(self):
self["checkList"].changeSelectionState()
self.selectedFiles = self["checkList"].getSelectedList()
def saveSelection(self):
self.selectedFiles = self["checkList"].getSelectedList()
config.plugins.configurationbackup.backupdirs.setValue(self.selectedFiles)
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
self.close(None)
def exit(self):
self.close(None)
def okClicked(self):
if self.filelist.canDescent():
self.filelist.descent()
class RestoreMenu(Screen):
skin = """
<screen name="RestoreMenu" position="center,center" size="560,400" title="Restore backups" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget name="filelist" position="5,50" size="550,230" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.skin_path = plugin_path
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Restore"))
self["key_yellow"] = StaticText(_("Delete"))
self["summary_description"] = StaticText("")
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel,
"up": self.keyUp,
"down": self.keyDown
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.KeyOk,
"yellow": self.deleteFile,
})
self.flist = []
self["filelist"] = MenuList(self.flist)
self.fill_list()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
self.checkSummary()
def setWindowTitle(self):
self.setTitle(_("Restore backups"))
def fill_list(self):
self.flist = []
self.path = getBackupPath()
if not path.isdir(self.path):
self.path = getOldBackupPath()
if not path.isdir(self.path):
makedirs(self.path)
for file in listdir(self.path):
if file.endswith(".tar.gz"):
self.flist.append(file)
self.entry = True
self.flist.sort(reverse=True)
self["filelist"].l.setList(self.flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
if self.sel:
self.val = self.path + "/" + self.sel
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore\nthe following backup:\n%s\nYour receiver will restart after the backup has been restored!") % self.sel)
def keyCancel(self):
self.close()
def keyUp(self):
self["filelist"].up()
self.checkSummary()
def keyDown(self):
self["filelist"].down()
self.checkSummary()
def startRestore(self, ret = False):
if ret == True:
self.session.open(RestoreScreen, runRestore = True, selectedBackupFile = self.path + "/" + self.sel)
def deleteFile(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
if self.sel:
self.val = self.path + "/" + self.sel
self.session.openWithCallback(self.startDelete, MessageBox, _("Are you sure you want to delete\nthe following backup:\n") + self.sel)
def startDelete(self, ret = False):
if ret == True:
self.exe = True
print "removing:",self.val
if path.exists(self.val) == True:
remove(self.val)
self.exe = False
self.fill_list()
def checkSummary(self):
cur = self["filelist"].getCurrent()
self["summary_description"].text = cur
class RestoreScreen(Screen, ConfigListScreen):
skin = """
<screen position="135,144" size="350,310" title="Restore is running..." >
<widget name="config" position="10,10" size="330,250" transparent="1" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, runRestore = False, selectedBackupFile = None):
Screen.__init__(self, session)
self.session = session
self.runRestore = runRestore
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.close,
"back": self.close,
"cancel": self.close,
}, -1)
self.backuppath = getBackupPath()
if not path.isdir(self.backuppath):
self.backuppath = getOldBackupPath()
self.backupfile = getBackupFilename()
if selectedBackupFile:
self.fullbackupfilename = selectedBackupFile
else:
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.list = []
ConfigListScreen.__init__(self, self.list)
self.onLayoutFinish.append(self.layoutFinished)
if self.runRestore:
self.onShown.append(self.doRestore)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Restoring..."))
def doRestore(self):
if path.exists("/proc/stb/vmpeg/0/dst_width"):
restorecmdlist = ["tar -xzvf " + self.fullbackupfilename + " -C /", "echo 0 > /proc/stb/vmpeg/0/dst_height", "echo 0 > /proc/stb/vmpeg/0/dst_left", "echo 0 > /proc/stb/vmpeg/0/dst_top", "echo 0 > /proc/stb/vmpeg/0/dst_width", "/etc/init.d/autofs restart"]
else:
restorecmdlist = ["tar -xzvf " + self.fullbackupfilename + " -C /", "/etc/init.d/autofs restart"]
print"[SOFTWARE MANAGER] Restore Settings !!!!"
self.session.open(Console, title = _("Restoring..."), cmdlist = restorecmdlist, finishedCallback = self.restoreFinishedCB)
def restoreFinishedCB(self,retval = None):
self.session.openWithCallback(self.checkPlugins, RestartNetwork)
def checkPlugins(self):
if path.exists("/tmp/installed-list.txt"):
if os.path.exists("/media/hdd/images/config/noplugins") and config.misc.firstrun.value:
self.userRestoreScript()
else:
self.session.openWithCallback(self.userRestoreScript, installedPlugins)
else:
self.userRestoreScript()
def userRestoreScript(self, ret = None):
SH_List = []
SH_List.append('/media/hdd/images/config/myrestore.sh')
SH_List.append('/media/usb/images/config/myrestore.sh')
SH_List.append('/media/cf/images/config/myrestore.sh')
startSH = None
for SH in SH_List:
if path.exists(SH):
startSH = SH
break
if startSH:
self.session.openWithCallback(self.restartGUI, Console, title = _("Running Myrestore script, Please wait ..."), cmdlist = [startSH], closeOnSuccess = True)
else:
self.restartGUI()
def restartGUI(self, ret = None):
self.session.open(Console, title = _("Your %s %s will Reboot...")% (getMachineBrand(), getMachineName()), cmdlist = ["killall -9 enigma2"])
def runAsync(self, finished_cb):
self.doRestore()
class RestartNetwork(Screen):
def __init__(self, session):
Screen.__init__(self, session)
skin = """
<screen name="RestartNetwork" position="center,center" size="600,100" title="Restart Network Adapter">
<widget name="label" position="10,30" size="500,50" halign="center" font="Regular;20" transparent="1" foregroundColor="white" />
</screen> """
self.skin = skin
self["label"] = Label(_("Please wait while your network is restarting..."))
self["summary_description"] = StaticText(_("Please wait while your network is restarting..."))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.restartLan)
def setWindowTitle(self):
self.setTitle(_("Restart Network Adapter"))
def restartLan(self):
print"[SOFTWARE MANAGER] Restart Network"
iNetwork.restartNetwork(self.restartLanDataAvail)
def restartLanDataAvail(self, data):
if data is True:
iNetwork.getInterfaces(self.getInterfacesDataAvail)
def getInterfacesDataAvail(self, data):
self.close()
class installedPlugins(Screen):
UPDATE = 0
LIST = 1
skin = """
<screen position="center,center" size="600,100" title="Install Plugins" >
<widget name="label" position="10,30" size="500,50" halign="center" font="Regular;20" transparent="1" foregroundColor="white" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Install Plugins"))
self["label"] = Label(_("Please wait while we check your installed plugins..."))
self["summary_description"] = StaticText(_("Please wait while we check your installed plugins..."))
self.type = self.UPDATE
self.container = eConsoleAppContainer()
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
self.remainingdata = ""
self.pluginsInstalled = []
self.doUpdate()
def doUpdate(self):
print"[SOFTWARE MANAGER] update package list"
self.container.execute("opkg update")
def doList(self):
print"[SOFTWARE MANAGER] read installed package list"
self.container.execute("opkg list-installed | egrep 'enigma2-plugin-|task-base|packagegroup-base'")
def dataAvail(self, strData):
if self.type == self.LIST:
strData = self.remainingdata + strData
lines = strData.split('\n')
if len(lines[-1]):
self.remainingdata = lines[-1]
lines = lines[0:-1]
else:
self.remainingdata = ""
for x in lines:
self.pluginsInstalled.append(x[:x.find(' - ')])
def runFinished(self, retval):
if self.type == self.UPDATE:
self.type = self.LIST
self.doList()
elif self.type == self.LIST:
self.readPluginList()
def readPluginList(self):
self.PluginList = []
f = open("/tmp/installed-list.txt", "r")
lines = f.readlines()
for x in lines:
self.PluginList.append(x[:x.find(' - ')])
f.close()
self.createMenuList()
def createMenuList(self):
self.Menulist = []
for x in self.PluginList:
if x not in self.pluginsInstalled:
self.Menulist.append(SettingsEntry(x , True))
if len(self.Menulist) == 0:
self.close()
else:
if os.path.exists("/media/hdd/images/config/plugins") and config.misc.firstrun.value:
self.startInstall(True)
else:
self.session.openWithCallback(self.startInstall, MessageBox, _("Backup plugins found\ndo you want to install now?"))
def startInstall(self, ret = None):
if ret:
self.session.openWithCallback(self.restoreCB, RestorePlugins, self.Menulist)
else:
self.close()
def restoreCB(self, ret = None):
self.close()
class RestorePlugins(Screen):
def __init__(self, session, menulist):
Screen.__init__(self, session)
skin = """
<screen name="RestorePlugins" position="center,center" size="650,500" title="Restore Plugins">
<widget source="menu" render="Listbox" position="12,12" size="627,416" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50,7), size = (590,60), flags = RT_HALIGN_LEFT, text = 0),
MultiContentEntryPixmapAlphaBlend(pos = (10,7), size = (50,40), png = 1),
],
"fonts": [gFont("Regular",22)],
"itemHeight":40
}
</convert>
</widget>
<ePixmap pixmap="skin_default/buttons/red.png" position="162,448" size="138,40" alphatest="blend" />
<ePixmap pixmap="skin_default/buttons/green.png" position="321,448" size="138,40" alphatest="blend" />
<widget name="key_red" position="169,455" size="124,26" zPosition="1" font="Regular;17" halign="center" transparent="1" />
<widget name="key_green" position="329,455" size="124,26" zPosition="1" font="Regular;17" halign="center" transparent="1" />
</screen>"""
self.skin = skin
Screen.setTitle(self, _("Restore Plugins"))
self.index = 0
self.list = menulist
for r in menulist:
print "[SOFTWARE MANAGER] Plugin to restore: %s" % r[0]
self.container = eConsoleAppContainer()
self["menu"] = List(list())
self["menu"].onSelectionChanged.append(self.selectionChanged)
self["key_green"] = Button(_("Install"))
self["key_red"] = Button(_("Cancel"))
self["summary_description"] = StaticText("")
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"red": self.exit,
"green": self.green,
"cancel": self.exit,
"ok": self.ok
}, -2)
self["menu"].setList(menulist)
self["menu"].setIndex(self.index)
self.selectionChanged()
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Restore Plugins"))
if os.path.exists("/media/hdd/images/config/plugins") and config.misc.firstrun.value:
self.green()
def exit(self):
self.close()
def green(self):
pluginlist = []
self.myipklist = []
for x in self.list:
if x[2]:
myipk = self.SearchIPK(x[0])
if myipk:
self.myipklist.append(myipk)
else:
pluginlist.append(x[0])
if len(pluginlist) > 0:
if len(self.myipklist) > 0:
self.session.open(Console, title = _("Installing plugins..."), cmdlist = ['opkg --force-overwrite install ' + ' '.join(pluginlist)], finishedCallback = self.installLocalIPK, closeOnSuccess = True)
else:
self.session.open(Console, title = _("Installing plugins..."), cmdlist = ['opkg --force-overwrite install ' + ' '.join(pluginlist)], finishedCallback = self.exit, closeOnSuccess = True)
elif len(self.myipklist) > 0:
self.installLocalIPK()
def installLocalIPK(self):
self.session.open(Console, title = _("Installing plugins..."), cmdlist = ['opkg --force-overwrite install ' + ' '.join(self.myipklist)], finishedCallback = self.exit, closeOnSuccess = True)
def ok(self):
index = self["menu"].getIndex()
item = self["menu"].getCurrent()[0]
state = self["menu"].getCurrent()[2]
if state:
self.list[index] = SettingsEntry(item , False)
else:
self.list[index] = SettingsEntry(item, True)
self["menu"].setList(self.list)
self["menu"].setIndex(index)
def selectionChanged(self):
index = self["menu"].getIndex()
if index == None:
index = 0
else:
self["summary_description"].text = self["menu"].getCurrent()[0]
self.index = index
def drawList(self):
self["menu"].setList(self.Menulist)
self["menu"].setIndex(self.index)
def exitNoPlugin(self, ret):
self.close()
def SearchIPK(self, ipkname):
ipkname = ipkname + "*"
search_dirs = [ "/media/hdd", "/media/usb" ]
sdirs = " ".join(search_dirs)
cmd = 'find %s -name "%s" | grep -iv "./open-multiboot/*" | head -n 1' % (sdirs, ipkname)
res = popen(cmd).read()
if res == "":
return None
else:
return res.replace("\n", "")
|
gpl-2.0
|
zakki/openhsp
|
hsp3ll/llvm/utils/lit/lit/main.py
|
2
|
18194
|
#!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
from __future__ import absolute_import
import math, os, platform, random, re, sys, time
import lit.ProgressBar
import lit.LitConfig
import lit.Test
import lit.run
import lit.util
import lit.discovery
class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.current = None
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
if self.opts.incremental:
update_incremental_cache(test)
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
shouldShow = test.result.code.isFailure or \
(self.opts.show_unsupported and test.result.code.name == 'UNSUPPORTED') or \
(not self.opts.quiet and not self.opts.succinct)
if not shouldShow:
return
if self.progressBar:
self.progressBar.clear()
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if test.result.code.isFailure and self.opts.showOutput:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Ensure the output is flushed.
sys.stdout.flush()
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def sort_by_incremental_cache(run):
def sortIndex(test):
fname = test.getFilePath()
try:
return -os.path.getmtime(fname)
except:
return 0
run.tests.sort(key = lambda t: sortIndex(t))
def main(builtinParameters = {}):
# Use processes by default on Unix platforms.
isWindows = platform.system() == 'Windows'
useProcessesIsDefault = not isWindows
global options
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {file-or-path}")
parser.add_option("", "--version", dest="show_version",
help="Show version and exit",
action="store_true", default=False)
parser.add_option("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, action="store", default=None)
parser.add_option("", "--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_option("", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
group = OptionGroup(parser, "Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
group.add_option("-q", "--quiet", dest="quiet",
help="Suppress no error output",
action="store_true", default=False)
group.add_option("-s", "--succinct", dest="succinct",
help="Reduce amount of output",
action="store_true", default=False)
group.add_option("-v", "--verbose", dest="showOutput",
help="Show all test output",
action="store_true", default=False)
group.add_option("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", type=str, metavar="PATH")
group.add_option("", "--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
group.add_option("", "--show-unsupported", dest="show_unsupported",
help="Show unsupported tests",
action="store_true", default=False)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Execution")
group.add_option("", "--path", dest="path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
group.add_option("", "--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
group.add_option("", "--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
group.add_option("", "--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
group.add_option("", "--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
group.add_option("", "--xunit-xml-output", dest="xunit_output_file",
help=("Write XUnit-compatible XML test reports to the"
" specified file"), default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Test Selection")
group.add_option("", "--max-tests", dest="maxTests", metavar="N",
help="Maximum number of tests to run",
action="store", type=int, default=None)
group.add_option("", "--max-time", dest="maxTime", metavar="N",
help="Maximum time to spend testing (in seconds)",
action="store", type=float, default=None)
group.add_option("", "--shuffle", dest="shuffle",
help="Run tests in random order",
action="store_true", default=False)
group.add_option("-i", "--incremental", dest="incremental",
help="Run modified and failing tests first (updates "
"mtimes)",
action="store_true", default=False)
group.add_option("", "--filter", dest="filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Debug and Experimental Options")
group.add_option("", "--debug", dest="debug",
help="Enable debugging (for 'lit' development)",
action="store_true", default=False)
group.add_option("", "--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
group.add_option("", "--show-tests", dest="showTests",
help="Show all discovered tests",
action="store_true", default=False)
group.add_option("", "--use-processes", dest="useProcesses",
help="Run tests in parallel with processes (not threads)",
action="store_true", default=useProcessesIsDefault)
group.add_option("", "--use-threads", dest="useProcesses",
help="Run tests in parallel with threads (not processes)",
action="store_false", default=useProcessesIsDefault)
parser.add_option_group(group)
(opts, args) = parser.parse_args()
if opts.show_version:
print("lit %s" % (lit.__version__,))
return
if not args:
parser.error('No inputs specified')
if opts.numThreads is None:
# Python <2.5 has a race condition causing lit to always fail with numThreads>1
# http://bugs.python.org/issue1731717
# I haven't seen this bug occur with 2.5.2 and later, so only enable multiple
# threads by default there.
if sys.hexversion >= 0x2050200:
opts.numThreads = lit.util.detectCPUs()
else:
opts.numThreads = 1
inputs = args
# Create the user defined parameters.
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
else:
name,val = entry.split('=', 1)
userParams[name] = val
# Create the global config object.
litConfig = lit.LitConfig.LitConfig(
progname = os.path.basename(sys.argv[0]),
path = opts.path,
quiet = opts.quiet,
useValgrind = opts.useValgrind,
valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
noExecute = opts.noExecute,
debug = opts.debug,
isWindows = isWindows,
params = userParams,
config_prefix = opts.configPrefix)
# Perform test discovery.
run = lit.run.Run(litConfig,
lit.discovery.find_tests_for_inputs(litConfig, inputs))
if opts.showSuites or opts.showTests:
# Aggregate the tests by suite.
suitesAndTests = {}
for result_test in run.tests:
if result_test.suite not in suitesAndTests:
suitesAndTests[result_test.suite] = []
suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
# Show the suites, if requested.
if opts.showSuites:
print('-- Test Suites --')
for ts,ts_tests in suitesAndTests:
print(' %s - %d tests' %(ts.name, len(ts_tests)))
print(' Source Root: %s' % ts.source_root)
print(' Exec Root : %s' % ts.exec_root)
# Show the tests, if requested.
if opts.showTests:
print('-- Available Tests --')
for ts,ts_tests in suitesAndTests:
ts_tests.sort(key = lambda test: test.path_in_suite)
for test in ts_tests:
print(' %s' % (test.getFullName(),))
# Exit.
sys.exit(0)
# Select and order the tests.
numTotalTests = len(run.tests)
# First, select based on the filter expression if given.
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
run.tests = [result_test for result_test in run.tests
if rex.search(result_test.getFullName())]
# Then select the order.
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
sort_by_incremental_cache(run)
else:
run.tests.sort(key = lambda result_test: result_test.getFullName())
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
run.tests = run.tests[:opts.maxTests]
# Don't create more threads than tests.
opts.numThreads = min(len(run.tests), opts.numThreads)
extra = ''
if len(run.tests) != numTotalTests:
extra = ' of %d' % numTotalTests
header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
opts.numThreads)
progressBar = None
if not opts.quiet:
if opts.succinct and opts.useProgressBar:
try:
tc = lit.ProgressBar.TerminalController()
progressBar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
print(header)
progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
print(header)
startTime = time.time()
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
run.execute_tests(display, opts.numThreads, opts.maxTime,
opts.useProcesses)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
testing_time = time.time() - startTime
if not opts.quiet:
print('Testing Time: %.2fs' % (testing_time,))
# Write out the test data, if requested.
if opts.output_path is not None:
write_test_results(run, litConfig, testing_time, opts.output_path)
# List test results organized by kind.
hasFailures = False
byCode = {}
for test in run.tests:
if test.result.code not in byCode:
byCode[test.result.code] = []
byCode[test.result.code].append(test)
if test.result.code.isFailure:
hasFailures = True
# Print each test in any of the failing groups.
for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
('Failing Tests', lit.Test.FAIL),
('Unresolved Tests', lit.Test.UNRESOLVED)):
elts = byCode.get(code)
if not elts:
continue
print('*'*20)
print('%s (%d):' % (title, len(elts)))
for test in elts:
print(' %s' % test.getFullName())
sys.stdout.write('\n')
if opts.timeTests and run.tests:
# Order by time.
test_times = [(test.getFullName(), test.result.elapsed)
for test in run.tests]
lit.util.printHistogram(test_times, title='Tests')
for name,code in (('Expected Passes ', lit.Test.PASS),
('Expected Failures ', lit.Test.XFAIL),
('Unsupported Tests ', lit.Test.UNSUPPORTED),
('Unresolved Tests ', lit.Test.UNRESOLVED),
('Unexpected Passes ', lit.Test.XPASS),
('Unexpected Failures', lit.Test.FAIL),):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
print(' %s: %d' % (name,N))
if opts.xunit_output_file:
# Collect the tests, indexed by test suite
by_suite = {}
for result_test in run.tests:
suite = result_test.suite.config.name
if suite not in by_suite:
by_suite[suite] = {
'passes' : 0,
'failures' : 0,
'tests' : [] }
by_suite[suite]['tests'].append(result_test)
if result_test.result.code.isFailure:
by_suite[suite]['failures'] += 1
else:
by_suite[suite]['passes'] += 1
xunit_output_file = open(opts.xunit_output_file, "w")
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
xunit_output_file.write("<testsuites>\n")
for suite_name, suite in by_suite.items():
xunit_output_file.write("<testsuite name='" + suite_name + "'")
xunit_output_file.write(" tests='" + str(suite['passes'] +
suite['failures']) + "'")
xunit_output_file.write(" failures='" + str(suite['failures']) +
"'>\n")
for result_test in suite['tests']:
xunit_output_file.write(result_test.getJUnitXML() + "\n")
xunit_output_file.write("</testsuite>\n")
xunit_output_file.write("</testsuites>")
xunit_output_file.close()
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
sys.exit(2)
# Warn about warnings.
if litConfig.numWarnings:
sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
if hasFailures:
sys.exit(1)
sys.exit(0)
if __name__=='__main__':
main()
|
bsd-3-clause
|
bmedx/modulestore
|
xmodule/contentstore/content.py
|
17
|
19479
|
import re
import uuid
from xmodule.assetstore.assetmgr import AssetManager
STATIC_CONTENT_VERSION = 1
XASSET_LOCATION_TAG = 'c4x'
XASSET_SRCREF_PREFIX = 'xasset:'
XASSET_THUMBNAIL_TAIL_NAME = '.jpg'
STREAM_DATA_CHUNK_SIZE = 1024
VERSIONED_ASSETS_PREFIX = '/assets/courseware'
VERSIONED_ASSETS_PATTERN = r'/assets/courseware/(v[\d]/)?([a-f0-9]{32})'
import os
import logging
import StringIO
from urlparse import urlparse, urlunparse, parse_qsl
from urllib import urlencode, quote_plus
from opaque_keys.edx.locator import AssetLocator
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys import InvalidKeyError
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.exceptions import NotFoundError
from PIL import Image
class StaticContent(object):
def __init__(self, loc, name, content_type, data, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False, content_digest=None):
self.location = loc
self.name = name # a display string which can be edited, and thus not part of the location which needs to be fixed
self.content_type = content_type
self._data = data
self.length = length
self.last_modified_at = last_modified_at
self.thumbnail_location = thumbnail_location
# optional information about where this file was imported from. This is needed to support import/export
# cycles
self.import_path = import_path
self.locked = locked
self.content_digest = content_digest
@property
def is_thumbnail(self):
return self.location.category == 'thumbnail'
@staticmethod
def generate_thumbnail_name(original_name, dimensions=None, extension=None):
"""
- original_name: Name of the asset (typically its location.name)
- dimensions: `None` or a tuple of (width, height) in pixels
- extension: `None` or desired filename extension of the thumbnail
"""
if extension is None:
extension = XASSET_THUMBNAIL_TAIL_NAME
name_root, ext = os.path.splitext(original_name)
if not ext == extension:
name_root = name_root + ext.replace(u'.', u'-')
if dimensions:
width, height = dimensions # pylint: disable=unpacking-non-sequence
name_root += "-{}x{}".format(width, height)
return u"{name_root}{extension}".format(
name_root=name_root,
extension=extension,
)
@staticmethod
def compute_location(course_key, path, revision=None, is_thumbnail=False):
"""
Constructs a location object for static content.
- course_key: the course that this asset belongs to
- path: is the name of the static asset
- revision: is the object's revision information
- is_thumbnail: is whether or not we want the thumbnail version of this
asset
"""
path = path.replace('/', '_')
return course_key.make_asset_key(
'asset' if not is_thumbnail else 'thumbnail',
AssetLocator.clean_keeping_underscores(path)
).for_branch(None)
def get_id(self):
return self.location
@property
def data(self):
return self._data
ASSET_URL_RE = re.compile(r"""
/?c4x/
(?P<org>[^/]+)/
(?P<course>[^/]+)/
(?P<category>[^/]+)/
(?P<name>[^/]+)
""", re.VERBOSE | re.IGNORECASE)
@staticmethod
def is_c4x_path(path_string):
"""
Returns a boolean if a path is believed to be a c4x link based on the leading element
"""
return StaticContent.ASSET_URL_RE.match(path_string) is not None
@staticmethod
def get_static_path_from_location(location):
"""
This utility static method will take a location identifier and create a 'durable' /static/.. URL representation of it.
This link is 'durable' as it can maintain integrity across cloning of courseware across course-ids, e.g. reruns of
courses.
In the LMS/CMS, we have runtime link-rewriting, so at render time, this /static/... format will get translated into
the actual /c4x/... path which the client needs to reference static content
"""
if location is not None:
return u"/static/{name}".format(name=location.name)
else:
return None
@staticmethod
def get_base_url_path_for_course_assets(course_key):
if course_key is None:
return None
assert isinstance(course_key, CourseKey)
placeholder_id = uuid.uuid4().hex
# create a dummy asset location with a fake but unique name. strip off the name, and return it
url_path = StaticContent.serialize_asset_key_with_slash(
course_key.make_asset_key('asset', placeholder_id).for_branch(None)
)
return url_path.replace(placeholder_id, '')
@staticmethod
def get_location_from_path(path):
"""
Generate an AssetKey for the given path (old c4x/org/course/asset/name syntax)
"""
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# TODO - re-address this once LMS-11198 is tackled.
if path.startswith('/'):
# try stripping off the leading slash and try again
return AssetKey.from_string(path[1:])
@staticmethod
def is_versioned_asset_path(path):
"""Determines whether the given asset path is versioned."""
return path.startswith(VERSIONED_ASSETS_PREFIX)
@staticmethod
def parse_versioned_asset_path(path):
"""
Examines an asset path and breaks it apart if it is versioned,
returning both the asset digest and the unversioned asset path,
which will normally be an AssetKey.
"""
asset_digest = None
asset_path = path
if StaticContent.is_versioned_asset_path(asset_path):
result = re.match(VERSIONED_ASSETS_PATTERN, asset_path)
if result is not None:
asset_digest = result.groups()[1]
asset_path = re.sub(VERSIONED_ASSETS_PATTERN, '', asset_path)
return (asset_digest, asset_path)
@staticmethod
def add_version_to_asset_path(path, version):
"""
Adds a prefix to an asset path indicating the asset's version.
"""
# Don't version an already-versioned path.
if StaticContent.is_versioned_asset_path(path):
return path
structure_version = 'v{}'.format(STATIC_CONTENT_VERSION)
return u'{}/{}/{}{}'.format(VERSIONED_ASSETS_PREFIX, structure_version, version, path)
@staticmethod
def get_asset_key_from_path(course_key, path):
"""
Parses a path, extracting an asset key or creating one.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
AssetKey: the asset key that represents the path
"""
# Clean up the path, removing any static prefix and any leading slash.
if path.startswith('/static/'):
path = path[len('/static/'):]
# Old-style asset keys start with `/`, so don't try and strip it
# in that case.
if not path.startswith('/c4x'):
path = path.lstrip('/')
try:
return AssetKey.from_string(path)
except InvalidKeyError:
# If we couldn't parse the path, just let compute_location figure it out.
# It's most likely a path like /image.png or something.
return StaticContent.compute_location(course_key, path)
@staticmethod
def is_excluded_asset_type(path, excluded_exts):
"""
Check if this is an allowed file extension to serve.
Some files aren't served through the CDN in order to avoid same-origin policy/CORS-related issues.
"""
return any(path.lower().endswith(excluded_ext.lower()) for excluded_ext in excluded_exts)
@staticmethod
def get_canonicalized_asset_path(course_key, path, base_url, excluded_exts, encode=True):
"""
Returns a fully-qualified path to a piece of static content.
If a static asset CDN is configured, this path will include it.
Otherwise, the path will simply be relative.
Args:
course_key: key to the course which owns this asset
path: the path to said content
Returns:
string: fully-qualified path to asset
"""
# Break down the input path.
_, _, relative_path, params, query_string, _ = urlparse(path)
# Convert our path to an asset key if it isn't one already.
asset_key = StaticContent.get_asset_key_from_path(course_key, relative_path)
# Check the status of the asset to see if this can be served via CDN aka publicly.
serve_from_cdn = False
content_digest = None
try:
content = AssetManager.find(asset_key, as_stream=True)
serve_from_cdn = not getattr(content, "locked", True)
content_digest = getattr(content, "content_digest", None)
except (ItemNotFoundError, NotFoundError):
# If we can't find the item, just treat it as if it's locked.
serve_from_cdn = False
# Do a generic check to see if anything about this asset disqualifies it from being CDN'd.
is_excluded = False
if StaticContent.is_excluded_asset_type(relative_path, excluded_exts):
serve_from_cdn = False
is_excluded = True
# Update any query parameter values that have asset paths in them. This is for assets that
# require their own after-the-fact values, like a Flash file that needs the path of a config
# file passed to it e.g. /static/visualization.swf?configFile=/static/visualization.xml
query_params = parse_qsl(query_string)
updated_query_params = []
for query_name, query_val in query_params:
if query_val.startswith("/static/"):
new_val = StaticContent.get_canonicalized_asset_path(
course_key, query_val, base_url, excluded_exts, encode=False)
updated_query_params.append((query_name, new_val))
else:
# Make sure we're encoding Unicode strings down to their byte string
# representation so that `urlencode` can handle it.
updated_query_params.append((query_name, query_val.encode('utf-8')))
serialized_asset_key = StaticContent.serialize_asset_key_with_slash(asset_key)
base_url = base_url if serve_from_cdn else ''
asset_path = serialized_asset_key
# If the content has a digest (i.e. md5sum) value specified, create a versioned path to the asset using it.
if not is_excluded and content_digest:
asset_path = StaticContent.add_version_to_asset_path(serialized_asset_key, content_digest)
# Only encode this if told to. Important so that we don't double encode
# when working with paths that are in query parameters.
asset_path = asset_path.encode('utf-8')
if encode:
asset_path = quote_plus(asset_path, '/:+@')
return urlunparse((None, base_url.encode('utf-8'), asset_path, params, urlencode(updated_query_params), None))
def stream_data(self):
yield self._data
@staticmethod
def serialize_asset_key_with_slash(asset_key):
"""
Legacy code expects the serialized asset key to start w/ a slash; so, do that in one place
:param asset_key:
"""
url = unicode(asset_key)
if not url.startswith('/'):
url = '/' + url # TODO - re-address this once LMS-11198 is tackled.
return url
class StaticContentStream(StaticContent):
def __init__(self, loc, name, content_type, stream, last_modified_at=None, thumbnail_location=None, import_path=None,
length=None, locked=False, content_digest=None):
super(StaticContentStream, self).__init__(loc, name, content_type, None, last_modified_at=last_modified_at,
thumbnail_location=thumbnail_location, import_path=import_path,
length=length, locked=locked, content_digest=content_digest)
self._stream = stream
def stream_data(self):
while True:
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
if len(chunk) == 0:
break
yield chunk
def stream_data_in_range(self, first_byte, last_byte):
"""
Stream the data between first_byte and last_byte (included)
"""
self._stream.seek(first_byte)
position = first_byte
while True:
if last_byte < position + STREAM_DATA_CHUNK_SIZE - 1:
chunk = self._stream.read(last_byte - position + 1)
yield chunk
break
chunk = self._stream.read(STREAM_DATA_CHUNK_SIZE)
position += STREAM_DATA_CHUNK_SIZE
yield chunk
def close(self):
self._stream.close()
def copy_to_in_mem(self):
self._stream.seek(0)
content = StaticContent(self.location, self.name, self.content_type, self._stream.read(),
last_modified_at=self.last_modified_at, thumbnail_location=self.thumbnail_location,
import_path=self.import_path, length=self.length, locked=self.locked,
content_digest=self.content_digest)
return content
class ContentStore(object):
'''
Abstraction for all ContentStore providers (e.g. MongoDB)
'''
def save(self, content):
raise NotImplementedError
def find(self, filename):
raise NotImplementedError
def get_all_content_for_course(self, course_key, start=0, maxresults=-1, sort=None, filter_params=None):
'''
Returns a list of static assets for a course, followed by the total number of assets.
By default all assets are returned, but start and maxresults can be provided to limit the query.
The return format is a list of asset data dictionaries.
The asset data dictionaries have the following keys:
asset_key (:class:`opaque_keys.edx.AssetKey`): The key of the asset
displayname: The human-readable name of the asset
uploadDate (datetime.datetime): The date and time that the file was uploadDate
contentType: The mimetype string of the asset
md5: An md5 hash of the asset content
'''
raise NotImplementedError
def delete_all_course_assets(self, course_key):
"""
Delete all of the assets which use this course_key as an identifier
:param course_key:
"""
raise NotImplementedError
def copy_all_course_assets(self, source_course_key, dest_course_key):
"""
Copy all the course assets from source_course_key to dest_course_key
"""
raise NotImplementedError
def generate_thumbnail(self, content, tempfile_path=None, dimensions=None):
"""Create a thumbnail for a given image.
Returns a tuple of (StaticContent, AssetKey)
`content` is the StaticContent representing the image you want to make a
thumbnail out of.
`tempfile_path` is a string path to the location of a file to read from
in order to grab the image data, instead of relying on `content.data`
`dimensions` is an optional param that represents (width, height) in
pixels. It defaults to None.
"""
thumbnail_content = None
is_svg = content.content_type == 'image/svg+xml'
# use a naming convention to associate originals with the thumbnail
thumbnail_name = StaticContent.generate_thumbnail_name(
content.location.name, dimensions=dimensions, extension='.svg' if is_svg else None
)
thumbnail_file_location = StaticContent.compute_location(
content.location.course_key, thumbnail_name, is_thumbnail=True
)
# if we're uploading an image, then let's generate a thumbnail so that we can
# serve it up when needed without having to rescale on the fly
try:
if is_svg:
# for svg simply store the provided svg file, since vector graphics should be good enough
# for downscaling client-side
if tempfile_path is None:
thumbnail_file = StringIO.StringIO(content.data)
else:
with open(tempfile_path) as f:
thumbnail_file = StringIO.StringIO(f.read())
thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name,
'image/svg+xml', thumbnail_file)
self.save(thumbnail_content)
elif content.content_type is not None and content.content_type.split('/')[0] == 'image':
# use PIL to do the thumbnail generation (http://www.pythonware.com/products/pil/)
# My understanding is that PIL will maintain aspect ratios while restricting
# the max-height/width to be whatever you pass in as 'size'
# @todo: move the thumbnail size to a configuration setting?!?
if tempfile_path is None:
source = StringIO.StringIO(content.data)
else:
source = tempfile_path
# We use the context manager here to avoid leaking the inner file descriptor
# of the Image object -- this way it gets closed after we're done with using it.
thumbnail_file = StringIO.StringIO()
with Image.open(source) as image:
# I've seen some exceptions from the PIL library when trying to save palletted
# PNG files to JPEG. Per the google-universe, they suggest converting to RGB first.
thumbnail_image = image.convert('RGB')
if not dimensions:
dimensions = (128, 128)
thumbnail_image.thumbnail(dimensions, Image.ANTIALIAS)
thumbnail_image.save(thumbnail_file, 'JPEG')
thumbnail_file.seek(0)
# store this thumbnail as any other piece of content
thumbnail_content = StaticContent(thumbnail_file_location, thumbnail_name,
'image/jpeg', thumbnail_file)
self.save(thumbnail_content)
except Exception, exc: # pylint: disable=broad-except
# log and continue as thumbnails are generally considered as optional
logging.exception(
u"Failed to generate thumbnail for {0}. Exception: {1}".format(content.location, str(exc))
)
return thumbnail_content, thumbnail_file_location
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
"""
pass
|
apache-2.0
|
Ell/goonauth
|
goonauth/settings.py
|
1
|
5913
|
import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = [
("Your Name", "[email protected]"),
]
MANAGERS = ADMINS
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "dev.db",
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = "e(!nulfj5r7zi8g!05=7v&)^q-7imzq3x*c#a4lxbztjp@5t_u"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
ROOT_URLCONF = "goonauth.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "goonauth.wsgi.application"
TEMPLATE_DIRS = [
os.path.join(PACKAGE_ROOT, "templates"),
]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"pinax_theme_bootstrap",
# external
"account",
"eventlog",
"metron",
"rest_framework",
"oauth2_provider",
"south",
"braces",
"sslserver",
# project
"goonauth",
"profiles",
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = True
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = True
ACCOUNT_LOGIN_REDIRECT_URL = "home"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
LOGIN_URL = '/account/login/'
OAUTH2_PROVIDER = {
# this is the list of available scopes
'SCOPES': {
'read': 'Read scope',
'write': 'Write scope',
}
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
AUTHENTICATION_BACKENDS = [
"account.auth_backends.UsernameAuthenticationBackend",
]
try:
from local_settings import *
except ImportError:
pass
|
mit
|
75651/kbengine_cloud
|
kbe/src/lib/python/Lib/logging/config.py
|
83
|
35727
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import errno
import io
import logging
import logging.handlers
import re
import struct
import sys
import traceback
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
RESET_ERROR = errno.ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
if isinstance(fname, configparser.RawConfigParser):
cp = fname
else:
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(level)
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(level)
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(level)
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
class ConvertingMixin(object):
"""For ConvertingXXX's, this mixin class provides common functions"""
def convert_with_key(self, key, value, replace=True):
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
if replace:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def convert(self, value):
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict, ConvertingMixin):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
return self.convert_with_key(key, value)
def get(self, key, default=None):
value = dict.get(self, key, default)
return self.convert_with_key(key, value)
def pop(self, key, default=None):
value = dict.pop(self, key, default)
return self.convert_with_key(key, value, replace=False)
class ConvertingList(list, ConvertingMixin):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
return self.convert_with_key(key, value)
def pop(self, idx=-1):
value = list.pop(self, idx)
return self.convert(value)
class ConvertingTuple(tuple, ConvertingMixin):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
# Can't replace a tuple entry.
return self.convert_with_key(key, value, replace=False)
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
props = config.pop('.', None)
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
Use the ``verify`` argument to verify any bytes received across the wire
from a client. If specified, it should be a callable which receives a
single argument - the bytes of configuration data received across the
network - and it should return either ``None``, to indicate that the
passed in bytes could not be verified and should be discarded, or a
byte string which is then passed to the configuration machinery as
normal. Note that you can return transformed bytes, e.g. by decrypting
the bytes passed in.
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
if self.server.verify is not None:
chunk = self.server.verify(chunk)
if chunk is not None: # verified, can process
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except Exception:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except Exception:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except OSError as e:
if e.errno != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None, verify=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
self.verify = verify
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port, verify):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.verify = verify
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready,
verify=self.verify)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
|
lgpl-3.0
|
SaranyaKarthikeyan/boto
|
tests/integration/iam/test_cert_verification.py
|
126
|
1542
|
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.iam
class IAMCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
iam = True
regions = boto.iam.regions()
def sample_service_call(self, conn):
conn.get_all_users()
|
mit
|
mkraemer67/pylearn2
|
pylearn2/linear/tests/test_matrixmul.py
|
49
|
3201
|
from pylearn2.linear.matrixmul import MatrixMul, make_local_rfs
from pylearn2.datasets.dense_design_matrix import DefaultViewConverter
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
import theano
from theano import tensor
import numpy as np
def test_matrixmul():
"""
Tests matrix multiplication for a range of different
dtypes. Checks both normal and transpose multiplication
using randomly generated matrices.
"""
rng = np.random.RandomState(222)
dtypes = [
'int16', 'int32', 'int64', 'float64', 'float32'
]
tensor_x = [
tensor.wmatrix(),
tensor.imatrix(),
tensor.lmatrix(),
tensor.dmatrix(),
tensor.fmatrix()
]
np_W, np_x, np_x_T = [], [], []
for dtype in dtypes:
if 'int' in dtype:
np_W.append(rng.randint(
-10, 10, rng.random_integers(5, size=2)
).astype(dtype))
np_x.append(rng.randint(
-10, 10, (rng.random_integers(5),
np_W[-1].shape[0])
).astype(dtype))
np_x_T.append(rng.randint(
-10, 10, (rng.random_integers(5),
np_W[-1].shape[1])
).astype(dtype))
elif 'float' in dtype:
np_W.append(rng.uniform(
-1, 1, rng.random_integers(5, size=2)
).astype(dtype))
np_x.append(rng.uniform(
-10, 10, (rng.random_integers(5),
np_W[-1].shape[0])
).astype(dtype))
np_x.append(rng.uniform(
-10, 10, (rng.random_integers(5),
np_W[-1].shape[1])
).astype(dtype))
else:
assert False
def sharedW(value, dtype):
return theano.shared(theano._asarray(value, dtype=dtype))
tensor_W = [sharedW(W, dtype) for W in np_W]
matrixmul = [MatrixMul(W) for W in tensor_W]
assert all(mm.get_params()[0] == W for mm, W in zip(matrixmul, tensor_W))
fn = [theano.function([x], mm.lmul(x))
for x, mm in zip(tensor_x, matrixmul)]
fn_T = [theano.function([x], mm.lmul_T(x))
for x, mm in zip(tensor_x, matrixmul)]
for W, x, x_T, f, f_T in zip(np_W, np_x, np_x_T, fn, fn_T):
np.testing.assert_allclose(f(x), np.dot(x, W))
np.testing.assert_allclose(f_T(x_T), np.dot(x_T, W.T))
def test_make_local_rfs():
view_converter = DefaultViewConverter((10, 10, 3))
test_dataset = DenseDesignMatrix(np.ones((10, 300)),
view_converter=view_converter)
matrixmul = make_local_rfs(test_dataset, 4, (5, 5), (5, 5),
draw_patches=True)
W = matrixmul.get_params()[0].get_value()
assert W.shape == (300, 4)
np.testing.assert_allclose(W.sum(axis=0), 75 * np.ones(4))
np.testing.assert_allclose(W.sum(axis=1), np.ones(300))
matrixmul = make_local_rfs(test_dataset, 4, (5, 5), (5, 5))
W = matrixmul.get_params()[0].get_value()
assert W.shape == (300, 4)
np.testing.assert_raises(ValueError, make_local_rfs,
test_dataset, 2, (5, 5), (5, 5))
|
bsd-3-clause
|
andybalaam/cell
|
tests/library_tests.py
|
1
|
6619
|
from io import StringIO
from tests.util.asserts import assert_that, assert_fails, equals
from tests.util.test import test
from pycell.env import Env
from pycell.eval_ import eval_list
from pycell.lexer import lex
from pycell.parser import parse
import pycell.library
# --- Utils
def evald(inp, stdout=None):
env = Env(stdout=stdout)
pycell.library.import_(env)
return eval_list(parse(lex(inp)), env)
# --- Tests
@test
def if_calls_then_if_first_argument_is_nonzero():
assert_that(
evald('if( 1, {"t";}, {"f";} );'),
equals(evald('"t";'))
)
@test
def if_calls_else_if_first_argument_is_zero():
assert_that(
evald('if( 0, {"t";}, {"f";} );'),
equals(evald('"f";'))
)
@test
def Call_if_with_a_nonnumber_is_an_error():
assert_fails(
"Only numbers may be passed to an if, but I was passed "
+ "'('string', 'x')'",
evald,
'if("x", {}, {});'
)
@test
def Equals_returns_true_for_identical_numbers():
assert_that(
evald('if(equals(1, 1), {4;}, {5;});'),
equals(evald("4;"))
)
@test
def Equals_returns_false_for_different_numbers():
assert_that(
evald('if(equals(1, 2), {4;}, {5;});'),
equals(evald("5;"))
)
@test
def Equals_returns_true_for_identical_strings():
assert_that(
evald('if(equals("foo", "foo"), {4;}, {5;});'),
equals(evald("4;"))
)
@test
def Equals_returns_false_for_different_strings():
assert_that(
evald('if(equals("foo", "bar"), {4;}, {5;});'),
equals(evald("5;"))
)
@test
def Equals_returns_false_for_different_types():
assert_that(
evald('if(equals(1, "1"), {4;}, {5;});'),
equals(evald("5;"))
)
@test
def Functions_are_not_equal_even_if_the_same():
assert_that(
evald('if(equals({3;}, {3;}), {4;}, {5;});'),
equals(evald("5;"))
)
@test
def Different_functions_are_not_equal():
assert_that(
evald('if(equals({:(x)3;}, {3;}), {4;}, {5;});'),
equals(evald("5;"))
)
assert_that(
evald('if(equals({3;}, {2; 3;}), {4;}, {5;});'),
equals(evald("5;"))
)
@test
def Print_prints_to_stdout():
stdout = StringIO()
evald('print("foo");', stdout=stdout)
assert_that(stdout.getvalue(), equals("foo\n"))
@test
def Print_returns_None():
stdout = StringIO()
assert_that(evald('print("foo");', stdout=stdout), equals(("none",)))
@test
def Set_changes_value_of_symbol():
assert_that(evald('x = 3; set("x", 4); x;'), equals(evald('4;')))
@test
def Set_changes_value_of_symbol_in_outer_scope():
assert_that(evald(
"""
foo = "bar";
fn = {
set("foo", "baz");
};
fn();
foo;
"""),
equals(evald('"baz";'))
)
@test
def Calling_set_with_nonstring_is_an_error():
assert_fails(
"set() takes a string as its first argument, but was: "
+ "('number', 3.0)",
evald,
"x = 3; set(x, 4);"
)
@test
def Can_make_a_pair_and_access_the_first_element():
assert_that(evald(
"""
p = pair("foo", 4);
first(p);
"""
),
equals(evald("'foo';"))
)
@test
def Can_make_a_pair_and_access_the_second_element():
assert_that(evald(
"""
p = pair("foo", 4);
second(p);
"""
),
equals(evald("4;"))
)
@test
def List0_is_None():
assert_that(evald("list0();"), equals(evald("None;")))
@test
def Can_append_item_to_an_empty_list():
assert_that(evald("first( append(list0(), 3));"), equals(evald("3;")))
assert_that(evald("second(append(list0(), 3));"), equals(evald("None;")))
@test
def Can_append_item_to_a_nonempty_list():
assert_that(
evald("first(append(list2(1, 2), 3));"),
equals(evald("1;"))
)
assert_that(
evald("first(second(append(list2(1, 2), 3)));"),
equals(evald("2;"))
)
assert_that(
evald("first(second(second(append(list2(1, 2), 3))));"),
equals(evald("3;"))
)
assert_that(
evald("second(second(second(append(list2(1, 2), 3))));"),
equals(evald("None;"))
)
@test
def Len_gives_the_length_of_a_string():
assert_that(evald("len('');"), equals(evald("0;")))
assert_that(evald("len('abc');"), equals(evald("3;")))
@test
def Char_at_gives_the_nth_character_of_a_string():
assert_that(evald("char_at(0, 'abc');"), equals(evald("'a';")))
assert_that(evald("char_at(1, 'abc');"), equals(evald("'b';")))
assert_that(evald("char_at(2, 'abc');"), equals(evald("'c';")))
assert_that(evald("char_at(3, 'abc');"), equals(evald("None;")))
assert_that(evald("char_at(0, '' );"), equals(evald("None;")))
assert_that(evald("char_at(1, '' );"), equals(evald("None;")))
@test
def Not_negates():
assert_that(evald("not(0);"), equals(evald("1;")))
assert_that(evald("not(1);"), equals(evald("0;")))
assert_that(evald("not(2);"), equals(evald("0;")))
@test
def For_loops_through_everything_in_a_list():
stdout = StringIO()
evald(
"""
for(list3("a", "b", "c"),
{:(ch)
print(ch);
});
""",
stdout=stdout
)
assert_that(stdout.getvalue(), equals("a\nb\nc\n"))
@test
def For_loops_through_nothing_when_given_empty_list():
stdout = StringIO()
evald(
"""
for(list0(),
{:(ch)
print(ch);
});
""",
stdout=stdout
)
assert_that(stdout.getvalue(), equals(""))
@test
def Chars_in_allows_iterating_over_the_characters_of_a_string():
stdout = StringIO()
evald(
"""
for(chars_in("abc"),
{:(ch)
print(ch);
});
""",
stdout=stdout
)
assert_that(stdout.getvalue(), equals("a\nb\nc\n"))
@test
def Chars_in_deals_well_with_empty_string():
stdout = StringIO()
evald(
"""
for(chars_in(""),
{:(ch)
print(ch);
});
""",
stdout=stdout
)
assert_that(stdout.getvalue(), equals(""))
@test
def Concat_two_empty_strings_gives_empty_string():
assert_that(evald("concat('', '');"), equals(evald("'';")))
@test
def Concat_a_string_with_an_empty_makes_the_string():
assert_that(evald("concat('abc', '');"), equals(evald("'abc';")))
assert_that(evald("concat('', 'abc');"), equals(evald("'abc';")))
@test
def Concat_two_strings_sticks_them_together():
assert_that(evald("concat('foo', 'bar');"), equals(evald("'foobar';")))
|
mit
|
kunesj/conkyKeep
|
conkyKeep/build_conkyrc.py
|
1
|
1624
|
#!/usr/bin/env python3
# encoding: utf-8
from .configmanager import CONFIG_MANAGER
CONKYRC_TOP = """
-- This file gets automatically generated by build_conkyrc.py
conky.config = {
"""
CONKYRC_BOTTOM = """
total_run_times = 0,
alignment = 'top_right',
background = true,
own_window = true,
own_window_type = 'desktop',
own_window_transparent = true, --false, -- Set to true for full transparancy
own_window_hints = 'undecorated,sticky,skip_taskbar,skip_pager,below',
out_to_console = false,
out_to_stderr = false,
override_utf8_locale = true,
double_buffer = true,
draw_shades = false,
draw_outline = false,
draw_borders = false,
stippled_borders = 0,
default_color = white,
no_buffers = true,
uppercase = false,
use_spacer = left,
-- important, standard size for var buffer is 128bit
imlib_cache_size = 0,
text_buffer_size = 6000
}
"""
def build_conkyrc(path):
rc = CONKYRC_TOP +\
" update_interval = %i,\n" % CONFIG_MANAGER.getInt("General", "ConkyUpdate") +\
" gap_x = %i,\n" % CONFIG_MANAGER.getInt("General", "ConkyGapX") +\
" gap_y = %i,\n" % CONFIG_MANAGER.getInt("General", "ConkyGapY") +\
" minimum_height = %i,\n" % CONFIG_MANAGER.getInt("General", "ConkyHeight") +\
" minimum_width = %i,\n" % CONFIG_MANAGER.getInt("General", "ConkyWidth") +\
CONKYRC_BOTTOM +\
"conky.text = [[ ${execpi %i python3 -m conkyKeep -n } ]]" % CONFIG_MANAGER.getInt("General", "ConkyKeepUpdate") +\
"\n"
with open(path, 'w') as f:
f.write(rc)
|
gpl-3.0
|
timothydmorton/bokeh
|
bokeh/deprecate.py
|
43
|
18856
|
# This deprecation library was adapted from Twisted. Full copyright
# statement retained at the bottom of this file
"""
Deprecation framework for Twisted.
To mark a method or function as being deprecated do this::
from twisted.python.versions import Version
from twisted.python.deprecate import deprecated
@deprecated(Version("Twisted", 8, 0, 0))
def badAPI(self, first, second):
'''
Docstring for badAPI.
'''
...
The newly-decorated badAPI will issue a warning when called. It will also have
a deprecation notice appended to its docstring.
To mark module-level attributes as being deprecated you can use::
badAttribute = "someValue"
...
deprecatedModuleAttribute(
Version("Twisted", 8, 0, 0),
"Use goodAttribute instead.",
"your.full.module.name",
"badAttribute")
The deprecated attributes will issue a warning whenever they are accessed. If
the attributes being deprecated are in the same module as the
L{deprecatedModuleAttribute} call is being made from, the C{__name__} global
can be used as the C{moduleName} parameter.
See also L{Version}.
@type DEPRECATION_WARNING_FORMAT: C{str}
@var DEPRECATION_WARNING_FORMAT: The default deprecation warning string format
to use when one is not provided by the user.
"""
from __future__ import absolute_import
__all__ = [
'deprecated_module',
'deprecated',
'getDeprecationWarningString',
'getWarningMethod',
'setWarningMethod',
'deprecatedModuleAttribute',
]
import types, sys, inspect
from warnings import warn, warn_explicit
from dis import findlinestarts
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
To use this function safely you must use the return value. In Python 2.3,
L{mergeFunctionMetadata} will create a new function. In later versions of
Python, C{g} will be mutated and returned.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
try:
merged = types.FunctionType(
g.func_code, g.func_globals,
f.__name__, inspect.getargspec(g)[-1],
g.func_closure)
except TypeError:
pass
else:
merged = g
try:
merged.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
merged.__dict__.update(g.__dict__)
merged.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
merged.__module__ = f.__module__
return merged
DEPRECATION_WARNING_FORMAT = '%(fqpn)s was deprecated in %(version)s'
# Notionally, part of twisted.python.reflect, but defining it there causes a
# cyclic dependency between this module and that module. Define it here,
# instead, and let reflect import it to re-expose to the public.
def _fullyQualifiedName(obj):
"""
Return the fully qualified name of a module, class, method or function.
Classes and functions need to be module level ones to be correctly
qualified.
@rtype: C{str}.
"""
name = obj.__name__
if inspect.isclass(obj) or inspect.isfunction(obj):
moduleName = obj.__module__
return "%s.%s" % (moduleName, name)
elif inspect.ismethod(obj):
className = _fullyQualifiedName(obj.im_class)
return "%s.%s" % (className, name)
return name
# Try to keep it looking like something in twisted.python.reflect.
_fullyQualifiedName.__module__ = 'twisted.python.reflect'
_fullyQualifiedName.__name__ = 'fullyQualifiedName'
def getWarningMethod():
"""
Return the warning method currently used to record deprecation warnings.
"""
return warn
def setWarningMethod(newMethod):
"""
Set the warning method to use to record deprecation warnings.
The callable should take message, category and stacklevel. The return
value is ignored.
"""
global warn
warn = newMethod
def _getDeprecationDocstring(version, replacement=None):
"""
Generate an addition to a deprecated object's docstring that explains its
deprecation.
@param version: the version it was deprecated.
@type version: L{Version}
@param replacement: The replacement, if specified.
@type replacement: C{str} or callable
@return: a string like "Deprecated in Twisted 27.2.0; please use
twisted.timestream.tachyon.flux instead."
"""
doc = "Deprecated in %s" % (version,)
if replacement:
doc = "%s; %s" % (doc, _getReplacementString(replacement))
return doc + "."
def _getReplacementString(replacement):
"""
Surround a replacement for a deprecated API with some polite text exhorting
the user to consider it as an alternative.
@type replacement: C{str} or callable
@return: a string like "please use twisted.python.modules.getModule
instead".
"""
if callable(replacement):
replacement = _fullyQualifiedName(replacement)
return "please use %s instead" % (replacement,)
def _getDeprecationWarningString(fqpn, version, format=None, replacement=None):
"""
Return a string indicating that the Python name was deprecated in the given
version.
@param fqpn: Fully qualified Python name of the thing being deprecated
@type fqpn: C{str}
@param version: Version that C{fqpn} was deprecated in.
@type version: L{twisted.python.versions.Version}
@param format: A user-provided format to interpolate warning values into, or
L{DEPRECATION_WARNING_FORMAT
<twisted.python.deprecate.DEPRECATION_WARNING_FORMAT>} if C{None} is
given.
@type format: C{str}
@param replacement: what should be used in place of C{fqpn}. Either pass in
a string, which will be inserted into the warning message, or a
callable, which will be expanded to its full import path.
@type replacement: C{str} or callable
@return: A textual description of the deprecation
@rtype: C{str}
"""
if format is None:
format = DEPRECATION_WARNING_FORMAT
warningString = format % {
'fqpn': fqpn,
'version': version}
if replacement:
warningString = "%s; %s" % (
warningString, _getReplacementString(replacement))
return warningString
def getDeprecationWarningString(callableThing, version, format=None,
replacement=None):
"""
Return a string indicating that the callable was deprecated in the given
version.
@type callableThing: C{callable}
@param callableThing: Callable object to be deprecated
@type version: L{twisted.python.versions.Version}
@param version: Version that C{callableThing} was deprecated in
@type format: C{str}
@param format: A user-provided format to interpolate warning values into,
or L{DEPRECATION_WARNING_FORMAT
<twisted.python.deprecate.DEPRECATION_WARNING_FORMAT>} if C{None} is
given
@param callableThing: A callable to be deprecated.
@param version: The L{twisted.python.versions.Version} that the callable
was deprecated in.
@param replacement: what should be used in place of the callable. Either
pass in a string, which will be inserted into the warning message,
or a callable, which will be expanded to its full import path.
@type replacement: C{str} or callable
@return: A string describing the deprecation.
@rtype: C{str}
"""
return _getDeprecationWarningString(
_fullyQualifiedName(callableThing), version, format, replacement)
def deprecated_module(name, version, replacement):
message = _getDeprecationWarningString(name, version, DEPRECATION_WARNING_FORMAT + ': ' + replacement)
warn(message, DeprecationWarning, stacklevel=2)
def deprecated(version, replacement=None):
"""
Return a decorator that marks callables as deprecated.
@type version: L{twisted.python.versions.Version}
@param version: The version in which the callable will be marked as
having been deprecated. The decorated function will be annotated
with this version, having it set as its C{deprecatedVersion}
attribute.
@param version: the version that the callable was deprecated in.
@type version: L{twisted.python.versions.Version}
@param replacement: what should be used in place of the callable. Either
pass in a string, which will be inserted into the warning message,
or a callable, which will be expanded to its full import path.
@type replacement: C{str} or callable
"""
def deprecationDecorator(function):
"""
Decorator that marks C{function} as deprecated.
"""
warningString = getDeprecationWarningString(
function, version, None, replacement)
def deprecatedFunction(*args, **kwargs):
warn(
warningString,
DeprecationWarning,
stacklevel=2)
return function(*args, **kwargs)
deprecatedFunction = mergeFunctionMetadata(
function, deprecatedFunction)
_appendToDocstring(deprecatedFunction,
_getDeprecationDocstring(version, replacement))
deprecatedFunction.deprecatedVersion = version
return deprecatedFunction
return deprecationDecorator
def _appendToDocstring(thingWithDoc, textToAppend):
"""
Append the given text to the docstring of C{thingWithDoc}.
If C{thingWithDoc} has no docstring, then the text just replaces the
docstring. If it has a single-line docstring then it appends a blank line
and the message text. If it has a multi-line docstring, then in appends a
blank line a the message text, and also does the indentation correctly.
"""
if thingWithDoc.__doc__:
docstringLines = thingWithDoc.__doc__.splitlines()
else:
docstringLines = []
if len(docstringLines) == 0:
docstringLines.append(textToAppend)
elif len(docstringLines) == 1:
docstringLines.extend(['', textToAppend, ''])
else:
spaces = docstringLines.pop()
docstringLines.extend(['',
spaces + textToAppend,
spaces])
thingWithDoc.__doc__ = '\n'.join(docstringLines)
class _ModuleProxy(object):
"""
Python module wrapper to hook module-level attribute access.
Access to deprecated attributes first checks
L{_ModuleProxy._deprecatedAttributes}, if the attribute does not appear
there then access falls through to L{_ModuleProxy._module}, the wrapped
module object.
@type _module: C{module}
@ivar _module: Module on which to hook attribute access.
@type _deprecatedAttributes: C{dict} mapping C{str} to
L{_DeprecatedAttribute}
@ivar _deprecatedAttributes: Mapping of attribute names to objects that
retrieve the module attribute's original value.
"""
def __init__(self, module):
object.__setattr__(self, '_module', module)
object.__setattr__(self, '_deprecatedAttributes', {})
def __repr__(self):
"""
Get a string containing the type of the module proxy and a
representation of the wrapped module object.
"""
_module = object.__getattribute__(self, '_module')
return '<%s module=%r>' % (
type(self).__name__,
_module)
def __setattr__(self, name, value):
"""
Set an attribute on the wrapped module object.
"""
_module = object.__getattribute__(self, '_module')
setattr(_module, name, value)
def __getattribute__(self, name):
"""
Get an attribute on the wrapped module object.
If the specified name has been deprecated then a warning is issued.
"""
_module = object.__getattribute__(self, '_module')
_deprecatedAttributes = object.__getattribute__(
self, '_deprecatedAttributes')
getter = _deprecatedAttributes.get(name)
if getter is not None:
value = getter.get()
else:
value = getattr(_module, name)
return value
class _DeprecatedAttribute(object):
"""
Wrapper for deprecated attributes.
This is intended to be used by L{_ModuleProxy}. Calling
L{_DeprecatedAttribute.get} will issue a warning and retrieve the
underlying attribute's value.
@type module: C{module}
@ivar module: The original module instance containing this attribute
@type fqpn: C{str}
@ivar fqpn: Fully qualified Python name for the deprecated attribute
@type version: L{twisted.python.versions.Version}
@ivar version: Version that the attribute was deprecated in
@type message: C{str}
@ivar message: Deprecation message
"""
def __init__(self, module, name, version, message):
"""
Initialise a deprecated name wrapper.
"""
self.module = module
self.__name__ = name
self.fqpn = module.__name__ + '.' + name
self.version = version
self.message = message
def get(self):
"""
Get the underlying attribute value and issue a deprecation warning.
"""
# This might fail if the deprecated thing is a module inside a package.
# In that case, don't emit the warning this time. The import system
# will come back again when it's not an AttributeError and we can emit
# the warning then.
result = getattr(self.module, self.__name__)
message = _getDeprecationWarningString(self.fqpn, self.version,
DEPRECATION_WARNING_FORMAT + ': ' + self.message)
warn(message, DeprecationWarning, stacklevel=3)
return result
def _deprecateAttribute(proxy, name, version, message):
"""
Mark a module-level attribute as being deprecated.
@type proxy: L{_ModuleProxy}
@param proxy: The module proxy instance proxying the deprecated attributes
@type name: C{str}
@param name: Attribute name
@type version: L{twisted.python.versions.Version}
@param version: Version that the attribute was deprecated in
@type message: C{str}
@param message: Deprecation message
"""
_module = object.__getattribute__(proxy, '_module')
attr = _DeprecatedAttribute(_module, name, version, message)
# Add a deprecated attribute marker for this module's attribute. When this
# attribute is accessed via _ModuleProxy a warning is emitted.
_deprecatedAttributes = object.__getattribute__(
proxy, '_deprecatedAttributes')
_deprecatedAttributes[name] = attr
def deprecatedModuleAttribute(version, message, moduleName, name):
"""
Declare a module-level attribute as being deprecated.
@type version: L{twisted.python.versions.Version}
@param version: Version that the attribute was deprecated in
@type message: C{str}
@param message: Deprecation message
@type moduleName: C{str}
@param moduleName: Fully-qualified Python name of the module containing
the deprecated attribute; if called from the same module as the
attributes are being deprecated in, using the C{__name__} global can
be helpful
@type name: C{str}
@param name: Attribute name to deprecate
"""
module = sys.modules[moduleName]
if not isinstance(module, _ModuleProxy):
module = _ModuleProxy(module)
sys.modules[moduleName] = module
_deprecateAttribute(module, name, version, message)
def warnAboutFunction(offender, warningString):
"""
Issue a warning string, identifying C{offender} as the responsible code.
This function is used to deprecate some behavior of a function. It differs
from L{warnings.warn} in that it is not limited to deprecating the behavior
of a function currently on the call stack.
@param function: The function that is being deprecated.
@param warningString: The string that should be emitted by this warning.
@type warningString: C{str}
@since: 11.0
"""
# inspect.getmodule() is attractive, but somewhat
# broken in Python < 2.6. See Python bug 4845.
offenderModule = sys.modules[offender.__module__]
filename = inspect.getabsfile(offenderModule)
lineStarts = list(findlinestarts(offender.func_code))
lastLineNo = lineStarts[-1][1]
globals = offender.func_globals
kwargs = dict(
category=DeprecationWarning,
filename=filename,
lineno=lastLineNo,
module=offenderModule.__name__,
registry=globals.setdefault("__warningregistry__", {}),
module_globals=None)
if sys.version_info[:2] < (2, 5):
kwargs.pop('module_globals')
warn_explicit(warningString, **kwargs)
# -*- test-case-name: twisted.python.test.test_deprecate -*-
# Copyright (c) Twisted Matrix Laboratories.
# Copyright (c) 2001-2011
# Allen Short
# Andy Gayton
# Andrew Bennetts
# Antoine Pitrou
# Apple Computer, Inc.
# Benjamin Bruheim
# Bob Ippolito
# Canonical Limited
# Christopher Armstrong
# David Reid
# Donovan Preston
# Eric Mangold
# Eyal Lotem
# Itamar Shtull-Trauring
# James Knight
# Jason A. Mobarak
# Jean-Paul Calderone
# Jessica McKellar
# Jonathan Jacobs
# Jonathan Lange
# Jonathan D. Simms
# Jargen Hermann
# Kevin Horn
# Kevin Turner
# Mary Gardiner
# Matthew Lefkowitz
# Massachusetts Institute of Technology
# Moshe Zadka
# Paul Swartz
# Pavel Pergamenshchik
# Ralph Meijer
# Sean Riley
# Software Freedom Conservancy
# Travis B. Hartwell
# Thijs Triemstra
# Thomas Herve
# Timothy Allen
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
bsd-3-clause
|
Insolita/creepy
|
creepy/ui/FilterLocationsPointDialog.py
|
7
|
6011
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\filterLocationsPointDialog.ui'
#
# Created: Fri Jan 31 15:33:25 2014
# by: PyQt4 UI code generator 4.9.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_FilteLocationsPointDialog(object):
def setupUi(self, FilteLocationsPointDialog):
FilteLocationsPointDialog.setObjectName(_fromUtf8("FilteLocationsPointDialog"))
FilteLocationsPointDialog.resize(758, 565)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/creepy/marker")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
FilteLocationsPointDialog.setWindowIcon(icon)
self.buttonBox = QtGui.QDialogButtonBox(FilteLocationsPointDialog)
self.buttonBox.setGeometry(QtCore.QRect(390, 520, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayoutWidget = QtGui.QWidget(FilteLocationsPointDialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 731, 501))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.containerLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.containerLayout.setMargin(0)
self.containerLayout.setObjectName(_fromUtf8("containerLayout"))
self.titleLabel = QtGui.QLabel(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.titleLabel.sizePolicy().hasHeightForWidth())
self.titleLabel.setSizePolicy(sizePolicy)
self.titleLabel.setTextFormat(QtCore.Qt.RichText)
self.titleLabel.setObjectName(_fromUtf8("titleLabel"))
self.containerLayout.addWidget(self.titleLabel)
self.webView = QtWebKit.QWebView(self.verticalLayoutWidget)
self.webView.setUrl(QtCore.QUrl(_fromUtf8("about:blank")))
self.webView.setObjectName(_fromUtf8("webView"))
self.containerLayout.addWidget(self.webView)
self.controlsContainerLayout = QtGui.QHBoxLayout()
self.controlsContainerLayout.setObjectName(_fromUtf8("controlsContainerLayout"))
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.controlsContainerLayout.addItem(spacerItem)
self.radiusLabel = QtGui.QLabel(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radiusLabel.sizePolicy().hasHeightForWidth())
self.radiusLabel.setSizePolicy(sizePolicy)
self.radiusLabel.setTextFormat(QtCore.Qt.RichText)
self.radiusLabel.setObjectName(_fromUtf8("radiusLabel"))
self.controlsContainerLayout.addWidget(self.radiusLabel)
self.radiusSpinBox = QtGui.QSpinBox(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radiusSpinBox.sizePolicy().hasHeightForWidth())
self.radiusSpinBox.setSizePolicy(sizePolicy)
self.radiusSpinBox.setMaximum(1000)
self.radiusSpinBox.setObjectName(_fromUtf8("radiusSpinBox"))
self.controlsContainerLayout.addWidget(self.radiusSpinBox)
self.radiusUnitComboBox = QtGui.QComboBox(self.verticalLayoutWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.radiusUnitComboBox.sizePolicy().hasHeightForWidth())
self.radiusUnitComboBox.setSizePolicy(sizePolicy)
self.radiusUnitComboBox.setObjectName(_fromUtf8("radiusUnitComboBox"))
self.controlsContainerLayout.addWidget(self.radiusUnitComboBox)
self.containerLayout.addLayout(self.controlsContainerLayout)
self.retranslateUi(FilteLocationsPointDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), FilteLocationsPointDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), FilteLocationsPointDialog.reject)
QtCore.QMetaObject.connectSlotsByName(FilteLocationsPointDialog)
def retranslateUi(self, FilteLocationsPointDialog):
FilteLocationsPointDialog.setWindowTitle(QtGui.QApplication.translate("FilteLocationsPointDialog", "Filter Locations By Place", None, QtGui.QApplication.UnicodeUTF8))
self.titleLabel.setText(QtGui.QApplication.translate("FilteLocationsPointDialog", "<html><head/><body><p><span style=\" font-size:9pt;\">Drop a </span><span style=\" font-size:9pt; font-weight:600; color:#ff0000;\">pin</span><span style=\" font-size:9pt;\"> on the map for your point of interest</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.radiusLabel.setText(QtGui.QApplication.translate("FilteLocationsPointDialog", "<html><head/><body><p><span style=\" font-size:9pt;\">Distance from the POI :</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
from PyQt4 import QtWebKit
import creepy_resources_rc
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
FilteLocationsPointDialog = QtGui.QDialog()
ui = Ui_FilteLocationsPointDialog()
ui.setupUi(FilteLocationsPointDialog)
FilteLocationsPointDialog.show()
sys.exit(app.exec_())
|
gpl-3.0
|
jardasgit/libvirt
|
tests/cputestdata/cpu-cpuid.py
|
3
|
6216
|
#!/usr/bin/env python3
import os
import sys
import json
import xmltodict
def checkCPUIDFeature(cpuData, feature):
eax_in = feature["eax_in"]
ecx_in = feature["ecx_in"]
eax = feature["eax"]
ebx = feature["ebx"]
ecx = feature["ecx"]
edx = feature["edx"]
if "cpuid" not in cpuData:
return False
cpuid = cpuData["cpuid"]
if eax_in not in cpuid or ecx_in not in cpuid[eax_in]:
return False
leaf = cpuid[eax_in][ecx_in]
return ((eax > 0 and leaf["eax"] & eax == eax) or
(ebx > 0 and leaf["ebx"] & ebx == ebx) or
(ecx > 0 and leaf["ecx"] & ecx == ecx) or
(edx > 0 and leaf["edx"] & edx == edx))
def checkMSRFeature(cpuData, feature):
index = feature["index"]
edx = feature["edx"]
eax = feature["eax"]
if "msr" not in cpuData:
return False
msr = cpuData["msr"]
if index not in msr:
return False
msr = msr[index]
return ((edx > 0 and msr["edx"] & edx == edx) or
(eax > 0 and msr["eax"] & eax == eax))
def checkFeature(cpuData, feature):
if feature["type"] == "cpuid":
return checkCPUIDFeature(cpuData, feature)
if feature["type"] == "msr":
return checkMSRFeature(cpuData, feature)
def addCPUIDFeature(cpuData, feature):
if "cpuid" not in cpuData:
cpuData["cpuid"] = {}
cpuid = cpuData["cpuid"]
if feature["eax_in"] not in cpuid:
cpuid[feature["eax_in"]] = {}
leaf = cpuid[feature["eax_in"]]
if feature["ecx_in"] not in leaf:
leaf[feature["ecx_in"]] = {"eax": 0, "ebx": 0, "ecx": 0, "edx": 0}
leaf = leaf[feature["ecx_in"]]
for reg in ["eax", "ebx", "ecx", "edx"]:
leaf[reg] |= feature[reg]
def addMSRFeature(cpuData, feature):
if "msr" not in cpuData:
cpuData["msr"] = {}
msr = cpuData["msr"]
if feature["index"] not in msr:
msr[feature["index"]] = {"edx": 0, "eax": 0}
msr = msr[feature["index"]]
for reg in ["edx", "eax"]:
msr[reg] |= feature[reg]
def addFeature(cpuData, feature):
if feature["type"] == "cpuid":
addCPUIDFeature(cpuData, feature)
elif feature["type"] == "msr":
addMSRFeature(cpuData, feature)
def parseQemu(path, features):
cpuData = {}
with open(path, "r") as f:
data, pos = json.JSONDecoder().raw_decode(f.read())
for (prop, val) in data["return"]["model"]["props"].items():
if val and prop in features:
addFeature(cpuData, features[prop])
return cpuData
def parseCPUData(path):
cpuData = {}
with open(path, "rb") as f:
data = xmltodict.parse(f)
for leaf in data["cpudata"]["cpuid"]:
feature = {"type": "cpuid"}
feature["eax_in"] = int(leaf["@eax_in"], 0)
feature["ecx_in"] = int(leaf["@ecx_in"], 0)
for reg in ["eax", "ebx", "ecx", "edx"]:
feature[reg] = int(leaf["@" + reg], 0)
addFeature(cpuData, feature)
if "msr" in data["cpudata"]:
if not isinstance(data["cpudata"]["msr"], list):
data["cpudata"]["msr"] = [data["cpudata"]["msr"]]
for msr in data["cpudata"]["msr"]:
feature = {"type": "msr"}
feature["index"] = int(msr["@index"], 0)
feature["edx"] = int(msr["@edx"], 0)
feature["eax"] = int(msr["@eax"], 0)
addFeature(cpuData, feature)
return cpuData
def parseMapFeature(fType, data):
ret = {"type": fType}
if fType == "cpuid":
fields = ["eax_in", "ecx_in", "eax", "ebx", "ecx", "edx"]
elif fType == "msr":
fields = ["index", "edx", "eax"]
for field in fields:
attr = "@%s" % field
if attr in data:
ret[field] = int(data[attr], 0)
else:
ret[field] = 0
return ret
def parseMap():
path = os.path.dirname(sys.argv[0])
path = os.path.join(path, "..", "..", "src", "cpu_map", "x86_features.xml")
with open(path, "rb") as f:
data = xmltodict.parse(f)
cpuMap = {}
for feature in data["cpus"]["feature"]:
for fType in ["cpuid", "msr"]:
if fType in feature:
cpuMap[feature["@name"]] = parseMapFeature(fType, feature[fType])
return cpuMap
def formatCPUData(cpuData, path, comment):
print(path)
with open(path, "w") as f:
f.write("<!-- " + comment + " -->\n")
f.write("<cpudata arch='x86'>\n")
cpuid = cpuData["cpuid"]
for eax_in in sorted(cpuid.keys()):
for ecx_in in sorted(cpuid[eax_in].keys()):
leaf = cpuid[eax_in][ecx_in]
line = (" <cpuid eax_in='0x%08x' ecx_in='0x%02x' "
"eax='0x%08x' ebx='0x%08x' "
"ecx='0x%08x' edx='0x%08x'/>\n")
f.write(line % (
eax_in, ecx_in,
leaf["eax"], leaf["ebx"], leaf["ecx"], leaf["edx"]))
if "msr" in cpuData:
msr = cpuData["msr"]
for index in sorted(msr.keys()):
f.write(" <msr index='0x%x' edx='0x%08x' eax='0x%08x'/>\n" %
(index, msr[index]['edx'], msr[index]['eax']))
f.write("</cpudata>\n")
def diff(cpuMap, path):
base = path.replace(".json", "")
jsonFile = path
cpuDataFile = base + ".xml"
enabledFile = base + "-enabled.xml"
disabledFile = base + "-disabled.xml"
cpuData = parseCPUData(cpuDataFile)
qemu = parseQemu(jsonFile, cpuMap)
enabled = {"cpuid": {}}
disabled = {"cpuid": {}}
for feature in cpuMap.values():
if checkFeature(qemu, feature):
addFeature(enabled, feature)
elif checkFeature(cpuData, feature):
addFeature(disabled, feature)
formatCPUData(enabled, enabledFile, "Features enabled by QEMU")
formatCPUData(disabled, disabledFile, "Features disabled by QEMU")
if len(sys.argv) < 3:
print("Usage: %s diff json_file..." % sys.argv[0])
sys.exit(1)
action = sys.argv[1]
args = sys.argv[2:]
if action == "diff":
cpuMap = parseMap()
for path in args:
diff(cpuMap, path)
else:
print("Unknown action: %s" % action)
sys.exit(1)
|
lgpl-2.1
|
aESeguridad/GERE
|
venv/local/lib/python2.7/encodings/iso8859_9.py
|
593
|
13412
|
""" Python Character Mapping Codec iso8859_9 generated from 'MAPPINGS/ISO8859/8859-9.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-9',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
gpl-3.0
|
luismmontielg/pyplotter
|
setup.py
|
1
|
1841
|
#-*- coding: utf-8 -*-
from pyplotter import __version__
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import subprocess
def convert(source, from_format, to_format):
p = subprocess.Popen(['pandoc', '--from=' + from_format, '--to=' + to_format],
stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
if sys.version_info[0] == 3:
return p.communicate(bytes(source, 'UTF-8'))[0]
return p.communicate(source)[0]
readme = open('README.md').read() # might want to use "with" to make sure it gets closed
long_description = convert(readme, 'markdown', 'rst')
except (OSError, IOError, ImportError) as e:
try:
long_description = open('README.md').read()
except (IOError):
long_description = ''
dependencies = ['docopt>=0.6.1',]
def publish():
os.system("python setup.py sdist upload")
if sys.argv[-1] == "publish":
publish()
sys.exit()
setup(
name='pyplotter',
version=".".join(str(x) for x in __version__),
description='Command line bar graphs',
long_description=long_description,
url='http://www.github.com/luismmontielg/pyplotter',
license="MIT License",
author='Luis Montiel',
author_email='[email protected]',
install_requires=dependencies,
packages=['pyplotter'],
entry_points={
'console_scripts': [
'pyplotter=pyplotter.main:run'
],
},
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
),
)
|
mit
|
jcrist/pydy
|
examples/Kane1985/Chapter6/Ex11.12_11.13.py
|
8
|
2718
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercises 11.12, 11.13 from Kane 1985."""
from __future__ import division
from sympy import pi, solve, symbols, trigsimp
from sympy.physics.mechanics import ReferenceFrame, RigidBody, Point
from sympy.physics.mechanics import dot, dynamicsymbols, inertia, msprint
from util import generalized_active_forces_K
from util import lagrange_equations, subs
g, m, R = symbols('g, m R')
q1, q2, q3, q4, q5 = q = dynamicsymbols('q1:6')
q1d, q2d, q3d, q4d, q5d = qd = dynamicsymbols('q1:6', level=1)
u1, u2, u3, u4, u5 = u = dynamicsymbols('u1:6')
# referenceframes
A = ReferenceFrame('A')
B_prime = A.orientnew('B_prime', 'Axis', [q1, A.z])
B = B_prime.orientnew('B', 'Axis', [pi/2 - q2, B_prime.x])
C = B.orientnew('C', 'Axis', [q3, B.z])
# points, velocities
pO = Point('O')
pO.set_vel(A, 0)
# R is the point in plane H that comes into contact with disk C.
pR = pO.locatenew('R', q4*A.x + q5*A.y)
pR.set_vel(A, pR.pos_from(pO).dt(A))
pR.set_vel(B, 0)
# C^ is the point in disk C that comes into contact with plane H.
pC_hat = pR.locatenew('C^', 0)
pC_hat.set_vel(C, 0)
# C* is the point at the center of disk C.
pC_star = pC_hat.locatenew('C*', R*B.y)
pC_star.set_vel(C, 0)
pC_star.set_vel(B, 0)
# calculate velocities in A
pC_star.v2pt_theory(pR, A, B)
pC_hat.v2pt_theory(pC_star, A, C)
# kinematic differential equations
#kde = [dot(C.ang_vel_in(A), x) - y for x, y in zip(B, u[:3])]
#kde += [x - y for x, y in zip(qd[3:], u[3:])]
#kde_map = solve(kde, qd)
kde = [x - y for x, y in zip(u, qd)]
kde_map = solve(kde, qd)
vc = map(lambda x: dot(pC_hat.vel(A), x), [A.x, A.y])
vc_map = solve(subs(vc, kde_map), [u4, u5])
# define disc rigidbody
IC = inertia(C, m*R**2/4, m*R**2/4, m*R**2/2)
rbC = RigidBody('rbC', pC_star, C, m, (IC, pC_star))
rbC.set_potential_energy(m*g*dot(pC_star.pos_from(pR), A.z))
# potential energy
V = rbC.potential_energy
print('V = {0}'.format(msprint(V)))
# kinetic energy
K = trigsimp(rbC.kinetic_energy(A).subs(kde_map).subs(vc_map))
print('K = {0}'.format(msprint(K)))
u_indep = [u1, u2, u3]
Fr = generalized_active_forces_K(K, q, u_indep, kde_map, vc_map)
# Fr + Fr* = 0 but the dynamical equations cannot be formulated by only
# kinetic energy as Fr = -Fr* for r = 1, ..., p
print('\ngeneralized active forces, Fr')
for i, x in enumerate(Fr, 1):
print('F{0} = {1}'.format(i, msprint(x)))
L = K - V
le = lagrange_equations(L, q, u, kde_map)
print('\nLagrange\'s equations of the second kind')
for i, x in enumerate(le, 1):
print('eq{0}: {1} = 0'.format(i, msprint(x)))
ud = map(lambda x: x.diff(symbols('t')), u)
de_map = solve(le, ud)
for k, v in de_map.iteritems():
print('{0} = {1}'.format(msprint(k), msprint(v)))
|
bsd-3-clause
|
jomo/youtube-dl
|
youtube_dl/extractor/nrk.py
|
6
|
10646
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
parse_duration,
unified_strdate,
)
class NRKIE(InfoExtractor):
_VALID_URL = r'(?:nrk:|http://(?:www\.)?nrk\.no/video/PS\*)(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.nrk.no/video/PS*150533',
'md5': 'bccd850baebefe23b56d708a113229c2',
'info_dict': {
'id': '150533',
'ext': 'flv',
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f',
'duration': 263,
}
},
{
'url': 'http://www.nrk.no/video/PS*154915',
'md5': '0b1493ba1aae7d9579a5ad5531bc395a',
'info_dict': {
'id': '154915',
'ext': 'flv',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
'duration': 20,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://v8.psapi.nrk.no/mediaelement/%s' % video_id,
video_id, 'Downloading media JSON')
if data['usageRights']['isGeoBlocked']:
raise ExtractorError(
'NRK har ikke rettig-heter til å vise dette programmet utenfor Norge',
expected=True)
video_url = data['mediaUrl'] + '?hdcore=3.5.0&plugin=aasp-3.5.0.151.81'
duration = parse_duration(data.get('duration'))
images = data.get('images')
if images:
thumbnails = images['webImages']
thumbnails.sort(key=lambda image: image['pixelWidth'])
thumbnail = thumbnails[-1]['imageUrl']
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': data['title'],
'description': data['description'],
'duration': duration,
'thumbnail': thumbnail,
}
class NRKPlaylistIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?nrk\.no/(?!video)(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
'info_dict': {
'id': 'gjenopplev-den-historiske-solformorkelsen-1.12270763',
'title': 'Gjenopplev den historiske solformørkelsen',
'description': 'md5:c2df8ea3bac5654a26fc2834a542feed',
},
'playlist_count': 2,
}, {
'url': 'http://www.nrk.no/kultur/bok/rivertonprisen-til-karin-fossum-1.12266449',
'info_dict': {
'id': 'rivertonprisen-til-karin-fossum-1.12266449',
'title': 'Rivertonprisen til Karin Fossum',
'description': 'Første kvinne på 15 år til å vinne krimlitteraturprisen.',
},
'playlist_count': 5,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result('nrk:%s' % video_id, 'NRK')
for video_id in re.findall(
r'class="[^"]*\brich\b[^"]*"[^>]+data-video-id="([^"]+)"',
webpage)
]
playlist_title = self._og_search_title(webpage)
playlist_description = self._og_search_description(webpage)
return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description)
class NRKTVIE(InfoExtractor):
_VALID_URL = r'(?P<baseurl>http://tv\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
_TESTS = [
{
'url': 'http://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
'md5': 'adf2c5454fa2bf032f47a9f8fb351342',
'info_dict': {
'id': 'MUHH48000314',
'ext': 'flv',
'title': '20 spørsmål',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'upload_date': '20140523',
'duration': 1741.52,
},
},
{
'url': 'http://tv.nrk.no/program/mdfp15000514',
'md5': '383650ece2b25ecec996ad7b5bb2a384',
'info_dict': {
'id': 'mdfp15000514',
'ext': 'flv',
'title': 'Kunnskapskanalen: Grunnlovsjubiléet - Stor ståhei for ingenting',
'description': 'md5:654c12511f035aed1e42bdf5db3b206a',
'upload_date': '20140524',
'duration': 4605.0,
},
},
{
# single playlist video
'url': 'http://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
'md5': 'adbd1dbd813edaf532b0a253780719c2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
'skip': 'Only works from Norway',
},
{
'url': 'http://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
'playlist': [
{
'md5': '9480285eff92d64f06e02a5367970a7a',
'info_dict': {
'id': 'MSPO40010515-part1',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 1:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
},
{
'md5': 'adbd1dbd813edaf532b0a253780719c2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
},
],
'info_dict': {
'id': 'MSPO40010515',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
'duration': 6947.5199999999995,
},
'skip': 'Only works from Norway',
}
]
def _debug_print(self, txt):
if self._downloader.params.get('verbose', False):
self.to_screen('[debug] %s' % txt)
def _get_subtitles(self, subtitlesurl, video_id, baseurl):
url = "%s%s" % (baseurl, subtitlesurl)
self._debug_print('%s: Subtitle url: %s' % (video_id, url))
captions = self._download_xml(
url, video_id, 'Downloading subtitles')
lang = captions.get('lang', 'no')
return {lang: [
{'ext': 'ttml', 'url': url},
]}
def _extract_f4m(self, manifest_url, video_id):
return self._extract_f4m_formats(manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
part_id = mobj.group('part_id')
baseurl = mobj.group('baseurl')
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
'title', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._html_search_regex(
r'data-posterimage="([^"]+)"',
webpage, 'thumbnail', fatal=False)
upload_date = unified_strdate(self._html_search_meta(
'rightsfrom', webpage, 'upload date', fatal=False))
duration = float_or_none(self._html_search_regex(
r'data-duration="([^"]+)"',
webpage, 'duration', fatal=False))
# playlist
parts = re.findall(
r'<a href="#del=(\d+)"[^>]+data-argument="([^"]+)">([^<]+)</a>', webpage)
if parts:
entries = []
for current_part_id, stream_url, part_title in parts:
if part_id and current_part_id != part_id:
continue
video_part_id = '%s-part%s' % (video_id, current_part_id)
formats = self._extract_f4m(stream_url, video_part_id)
entries.append({
'id': video_part_id,
'title': part_title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
})
if part_id:
if entries:
return entries[0]
else:
playlist = self.playlist_result(entries, video_id, title, description)
playlist.update({
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
})
return playlist
formats = []
f4m_url = re.search(r'data-media="([^"]+)"', webpage)
if f4m_url:
formats.extend(self._extract_f4m(f4m_url.group(1), video_id))
m3u8_url = re.search(r'data-hls-media="([^"]+)"', webpage)
if m3u8_url:
formats.extend(self._extract_m3u8_formats(m3u8_url.group(1), video_id, 'mp4'))
self._sort_formats(formats)
subtitles_url = self._html_search_regex(
r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"',
webpage, 'subtitle URL', default=None)
subtitles = None
if subtitles_url:
subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
|
unlicense
|
0x46616c6b/ansible
|
lib/ansible/modules/network/snmp_facts.py
|
11
|
13233
|
#!/usr/bin/python
# This file is part of Networklore's snmp library for Ansible
#
# The module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: snmp_facts
version_added: "1.9"
author: "Patrick Ogenstad (@ogenstad)"
short_description: Retrieve facts for a device using SNMP.
description:
- Retrieve facts for a device using SNMP, the facts will be
inserted to the ansible_facts key.
requirements:
- pysnmp
options:
host:
description:
- Set to target snmp server (normally {{inventory_hostname}})
required: true
version:
description:
- SNMP Version to use, v2/v2c or v3
choices: [ 'v2', 'v2c', 'v3' ]
required: true
community:
description:
- The SNMP community string, required if version is v2/v2c
required: false
level:
description:
- Authentication level, required if version is v3
choices: [ 'authPriv', 'authNoPriv' ]
required: false
username:
description:
- Username for SNMPv3, required if version is v3
required: false
integrity:
description:
- Hashing algorithm, required if version is v3
choices: [ 'md5', 'sha' ]
required: false
authkey:
description:
- Authentication key, required if version is v3
required: false
privacy:
description:
- Encryption algorithm, required if level is authPriv
choices: [ 'des', 'aes' ]
required: false
privkey:
description:
- Encryption key, required if version is authPriv
required: false
'''
EXAMPLES = '''
# Gather facts with SNMP version 2
- snmp_facts:
host: '{{ inventory_hostname }}'
version: 2c
community: public
delegate_to: local
# Gather facts using SNMP version 3
- snmp_facts:
host: '{{ inventory_hostname }}'
version: v3
level: authPriv
integrity: sha
privacy: aes
username: snmp-user
authkey: abc12345
privkey: def6789
delegate_to: localhost
'''
import binascii
from ansible.module_utils.basic import *
from ansible.module_utils._text import to_text
from collections import defaultdict
try:
from pysnmp.entity.rfc3413.oneliner import cmdgen
has_pysnmp = True
except:
has_pysnmp = False
class DefineOid(object):
def __init__(self,dotprefix=False):
if dotprefix:
dp = "."
else:
dp = ""
# From SNMPv2-MIB
self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
self.sysName = dp + "1.3.6.1.2.1.1.5.0"
self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
# From IF-MIB
self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
# From IP-MIB
self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
def decode_hex(hexstring):
if len(hexstring) < 3:
return hexstring
if hexstring[:2] == "0x":
return to_text(binascii.unhexlify(hexstring[2:]))
else:
return hexstring
def decode_mac(hexstring):
if len(hexstring) != 14:
return hexstring
if hexstring[:2] == "0x":
return hexstring[2:]
else:
return hexstring
def lookup_adminstatus(int_adminstatus):
adminstatus_options = {
1: 'up',
2: 'down',
3: 'testing'
}
if int_adminstatus in adminstatus_options:
return adminstatus_options[int_adminstatus]
else:
return ""
def lookup_operstatus(int_operstatus):
operstatus_options = {
1: 'up',
2: 'down',
3: 'testing',
4: 'unknown',
5: 'dormant',
6: 'notPresent',
7: 'lowerLayerDown'
}
if int_operstatus in operstatus_options:
return operstatus_options[int_operstatus]
else:
return ""
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
version=dict(required=True, choices=['v2', 'v2c', 'v3']),
community=dict(required=False, default=False),
username=dict(required=False),
level=dict(required=False, choices=['authNoPriv', 'authPriv']),
integrity=dict(required=False, choices=['md5', 'sha']),
privacy=dict(required=False, choices=['des', 'aes']),
authkey=dict(required=False),
privkey=dict(required=False),
removeplaceholder=dict(required=False)),
required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
supports_check_mode=False)
m_args = module.params
if not has_pysnmp:
module.fail_json(msg='Missing required pysnmp module (check docs)')
cmdGen = cmdgen.CommandGenerator()
# Verify that we receive a community when using snmp v2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
if m_args['community'] is False:
module.fail_json(msg='Community not set when using snmp version 2')
if m_args['version'] == "v3":
if m_args['username'] is None:
module.fail_json(msg='Username not set when using snmp version 3')
if m_args['level'] == "authPriv" and m_args['privacy'] is None:
module.fail_json(msg='Privacy algorithm not set when using authPriv')
if m_args['integrity'] == "sha":
integrity_proto = cmdgen.usmHMACSHAAuthProtocol
elif m_args['integrity'] == "md5":
integrity_proto = cmdgen.usmHMACMD5AuthProtocol
if m_args['privacy'] == "aes":
privacy_proto = cmdgen.usmAesCfb128Protocol
elif m_args['privacy'] == "des":
privacy_proto = cmdgen.usmDESPrivProtocol
# Use SNMP Version 2
if m_args['version'] == "v2" or m_args['version'] == "v2c":
snmp_auth = cmdgen.CommunityData(m_args['community'])
# Use SNMP Version 3 with authNoPriv
elif m_args['level'] == "authNoPriv":
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
# Use SNMP Version 3 with authPriv
else:
snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
# Use p to prefix OIDs with a dot for polling
p = DefineOid(dotprefix=True)
# Use v without a prefix to use with return values
v = DefineOid(dotprefix=False)
Tree = lambda: defaultdict(Tree)
results = Tree()
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.sysDescr,),
cmdgen.MibVariable(p.sysObjectId,),
cmdgen.MibVariable(p.sysUpTime,),
cmdgen.MibVariable(p.sysContact,),
cmdgen.MibVariable(p.sysName,),
cmdgen.MibVariable(p.sysLocation,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if current_oid == v.sysDescr:
results['ansible_sysdescr'] = decode_hex(current_val)
elif current_oid == v.sysObjectId:
results['ansible_sysobjectid'] = current_val
elif current_oid == v.sysUpTime:
results['ansible_sysuptime'] = current_val
elif current_oid == v.sysContact:
results['ansible_syscontact'] = current_val
elif current_oid == v.sysName:
results['ansible_sysname'] = current_val
elif current_oid == v.sysLocation:
results['ansible_syslocation'] = current_val
errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.ifIndex,),
cmdgen.MibVariable(p.ifDescr,),
cmdgen.MibVariable(p.ifMtu,),
cmdgen.MibVariable(p.ifSpeed,),
cmdgen.MibVariable(p.ifPhysAddress,),
cmdgen.MibVariable(p.ifAdminStatus,),
cmdgen.MibVariable(p.ifOperStatus,),
cmdgen.MibVariable(p.ipAdEntAddr,),
cmdgen.MibVariable(p.ipAdEntIfIndex,),
cmdgen.MibVariable(p.ipAdEntNetMask,),
cmdgen.MibVariable(p.ifAlias,),
lookupMib=False
)
if errorIndication:
module.fail_json(msg=str(errorIndication))
interface_indexes = []
all_ipv4_addresses = []
ipv4_networks = Tree()
for varBinds in varTable:
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
if v.ifIndex in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['ifindex'] = current_val
interface_indexes.append(ifIndex)
if v.ifDescr in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['name'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mtu'] = current_val
if v.ifMtu in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['speed'] = current_val
if v.ifPhysAddress in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
if v.ifAdminStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
if v.ifOperStatus in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
if v.ipAdEntAddr in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['address'] = current_val
all_ipv4_addresses.append(current_val)
if v.ipAdEntIfIndex in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['interface'] = current_val
if v.ipAdEntNetMask in current_oid:
curIPList = current_oid.rsplit('.', 4)[-4:]
curIP = ".".join(curIPList)
ipv4_networks[curIP]['netmask'] = current_val
if v.ifAlias in current_oid:
ifIndex = int(current_oid.rsplit('.', 1)[-1])
results['ansible_interfaces'][ifIndex]['description'] = current_val
interface_to_ipv4 = {}
for ipv4_network in ipv4_networks:
current_interface = ipv4_networks[ipv4_network]['interface']
current_network = {
'address': ipv4_networks[ipv4_network]['address'],
'netmask': ipv4_networks[ipv4_network]['netmask']
}
if not current_interface in interface_to_ipv4:
interface_to_ipv4[current_interface] = []
interface_to_ipv4[current_interface].append(current_network)
else:
interface_to_ipv4[current_interface].append(current_network)
for interface in interface_to_ipv4:
results['ansible_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
module.exit_json(ansible_facts=results)
if __name__ == '__main__':
main()
|
gpl-3.0
|
OpenMined/PySyft
|
packages/syft/src/syft/proto/lib/tenseal/vector_pb2.py
|
1
|
4088
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/lib/tenseal/vector.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/lib/tenseal/vector.proto",
package="syft.lib.tenseal",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1eproto/lib/tenseal/vector.proto\x12\x10syft.lib.tenseal\x1a%proto/core/common/common_object.proto"T\n\rTenSEALVector\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x10\n\x08obj_type\x18\x02 \x01(\t\x12\x0e\n\x06vector\x18\x04 \x01(\x0c\x62\x06proto3',
dependencies=[
proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR,
],
)
_TENSEALVECTOR = _descriptor.Descriptor(
name="TenSEALVector",
full_name="syft.lib.tenseal.TenSEALVector",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.lib.tenseal.TenSEALVector.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="obj_type",
full_name="syft.lib.tenseal.TenSEALVector.obj_type",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="vector",
full_name="syft.lib.tenseal.TenSEALVector.vector",
index=2,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"",
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=91,
serialized_end=175,
)
_TENSEALVECTOR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
DESCRIPTOR.message_types_by_name["TenSEALVector"] = _TENSEALVECTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TenSEALVector = _reflection.GeneratedProtocolMessageType(
"TenSEALVector",
(_message.Message,),
{
"DESCRIPTOR": _TENSEALVECTOR,
"__module__": "proto.lib.tenseal.vector_pb2"
# @@protoc_insertion_point(class_scope:syft.lib.tenseal.TenSEALVector)
},
)
_sym_db.RegisterMessage(TenSEALVector)
# @@protoc_insertion_point(module_scope)
|
apache-2.0
|
wrigri/compute-image-packages
|
packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_manager.py
|
6
|
4004
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage the retrieval and excution of metadata scripts."""
import contextlib
import logging.handlers
import optparse
import shutil
import tempfile
from google_compute_engine import config_manager
from google_compute_engine import logger
from google_compute_engine.metadata_scripts import script_executor
from google_compute_engine.metadata_scripts import script_retriever
@contextlib.contextmanager
def _CreateTempDir(prefix, run_dir=None):
"""Context manager for creating a temporary directory.
Args:
prefix: string, the prefix for the temporary directory.
run_dir: string, the base directory location of the temporary directory.
Yields:
string, the temporary directory created.
"""
temp_dir = tempfile.mkdtemp(prefix=prefix + '-', dir=run_dir)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
class ScriptManager(object):
"""A class for retrieving and executing metadata scripts."""
def __init__(
self, script_type, default_shell=None, run_dir=None, debug=False):
"""Constructor.
Args:
script_type: string, the metadata script type to run.
default_shell: string, the default shell to execute the script.
run_dir: string, the base directory location of the temporary directory.
debug: bool, True if debug output should write to the console.
"""
self.script_type = script_type
self.default_shell = default_shell
name = '%s-script' % self.script_type
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(name=name, debug=debug, facility=facility)
self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)
self.executor = script_executor.ScriptExecutor(
self.logger, script_type, default_shell=default_shell)
self._RunScripts(run_dir=run_dir)
def _RunScripts(self, run_dir=None):
"""Retrieve metadata scripts and execute them.
Args:
run_dir: string, the base directory location of the temporary directory.
"""
with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:
try:
self.logger.info('Starting %s scripts.', self.script_type)
script_dict = self.retriever.GetScripts(dest_dir)
self.executor.RunScripts(script_dict)
finally:
self.logger.info('Finished running %s scripts.', self.script_type)
def main():
script_types = ('startup', 'shutdown')
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug', action='store_true', dest='debug',
help='print debug output to the console.')
parser.add_option(
'--script-type', dest='script_type', help='metadata script type.')
(options, _) = parser.parse_args()
if options.script_type and options.script_type.lower() in script_types:
script_type = options.script_type.lower()
else:
valid_args = ', '.join(script_types)
message = 'No valid argument specified. Options: [%s].' % valid_args
raise ValueError(message)
instance_config = config_manager.ConfigManager()
if instance_config.GetOptionBool('MetadataScripts', script_type):
ScriptManager(
script_type,
default_shell=instance_config.GetOptionString(
'MetadataScripts', 'default_shell'),
run_dir=instance_config.GetOptionString('MetadataScripts', 'run_dir'),
debug=bool(options.debug))
if __name__ == '__main__':
main()
|
apache-2.0
|
FATruden/boto
|
tests/integration/gs/util.py
|
9
|
3215
|
# Copyright (c) 2012, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import time
from boto.provider import Provider
_HAS_GOOGLE_CREDENTIALS = None
def has_google_credentials():
global _HAS_GOOGLE_CREDENTIALS
if _HAS_GOOGLE_CREDENTIALS is None:
provider = Provider('google')
if provider.access_key is None or provider.secret_key is None:
_HAS_GOOGLE_CREDENTIALS = False
else:
_HAS_GOOGLE_CREDENTIALS = True
return _HAS_GOOGLE_CREDENTIALS
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2, logger=None):
"""Retry calling the decorated function using an exponential backoff.
Taken from:
https://github.com/saltycrane/retry-decorator
Licensed under BSD:
https://github.com/saltycrane/retry-decorator/blob/master/LICENSE
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
try_one_last_time = True
while mtries > 1:
try:
return f(*args, **kwargs)
try_one_last_time = False
break
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
if try_one_last_time:
return f(*args, **kwargs)
return
return f_retry # true decorator
return deco_retry
|
mit
|
Thraxis/pymedusa
|
lib/unidecode/x0c6.py
|
253
|
4490
|
data = (
'yeoss', # 0x00
'yeong', # 0x01
'yeoj', # 0x02
'yeoc', # 0x03
'yeok', # 0x04
'yeot', # 0x05
'yeop', # 0x06
'yeoh', # 0x07
'ye', # 0x08
'yeg', # 0x09
'yegg', # 0x0a
'yegs', # 0x0b
'yen', # 0x0c
'yenj', # 0x0d
'yenh', # 0x0e
'yed', # 0x0f
'yel', # 0x10
'yelg', # 0x11
'yelm', # 0x12
'yelb', # 0x13
'yels', # 0x14
'yelt', # 0x15
'yelp', # 0x16
'yelh', # 0x17
'yem', # 0x18
'yeb', # 0x19
'yebs', # 0x1a
'yes', # 0x1b
'yess', # 0x1c
'yeng', # 0x1d
'yej', # 0x1e
'yec', # 0x1f
'yek', # 0x20
'yet', # 0x21
'yep', # 0x22
'yeh', # 0x23
'o', # 0x24
'og', # 0x25
'ogg', # 0x26
'ogs', # 0x27
'on', # 0x28
'onj', # 0x29
'onh', # 0x2a
'od', # 0x2b
'ol', # 0x2c
'olg', # 0x2d
'olm', # 0x2e
'olb', # 0x2f
'ols', # 0x30
'olt', # 0x31
'olp', # 0x32
'olh', # 0x33
'om', # 0x34
'ob', # 0x35
'obs', # 0x36
'os', # 0x37
'oss', # 0x38
'ong', # 0x39
'oj', # 0x3a
'oc', # 0x3b
'ok', # 0x3c
'ot', # 0x3d
'op', # 0x3e
'oh', # 0x3f
'wa', # 0x40
'wag', # 0x41
'wagg', # 0x42
'wags', # 0x43
'wan', # 0x44
'wanj', # 0x45
'wanh', # 0x46
'wad', # 0x47
'wal', # 0x48
'walg', # 0x49
'walm', # 0x4a
'walb', # 0x4b
'wals', # 0x4c
'walt', # 0x4d
'walp', # 0x4e
'walh', # 0x4f
'wam', # 0x50
'wab', # 0x51
'wabs', # 0x52
'was', # 0x53
'wass', # 0x54
'wang', # 0x55
'waj', # 0x56
'wac', # 0x57
'wak', # 0x58
'wat', # 0x59
'wap', # 0x5a
'wah', # 0x5b
'wae', # 0x5c
'waeg', # 0x5d
'waegg', # 0x5e
'waegs', # 0x5f
'waen', # 0x60
'waenj', # 0x61
'waenh', # 0x62
'waed', # 0x63
'wael', # 0x64
'waelg', # 0x65
'waelm', # 0x66
'waelb', # 0x67
'waels', # 0x68
'waelt', # 0x69
'waelp', # 0x6a
'waelh', # 0x6b
'waem', # 0x6c
'waeb', # 0x6d
'waebs', # 0x6e
'waes', # 0x6f
'waess', # 0x70
'waeng', # 0x71
'waej', # 0x72
'waec', # 0x73
'waek', # 0x74
'waet', # 0x75
'waep', # 0x76
'waeh', # 0x77
'oe', # 0x78
'oeg', # 0x79
'oegg', # 0x7a
'oegs', # 0x7b
'oen', # 0x7c
'oenj', # 0x7d
'oenh', # 0x7e
'oed', # 0x7f
'oel', # 0x80
'oelg', # 0x81
'oelm', # 0x82
'oelb', # 0x83
'oels', # 0x84
'oelt', # 0x85
'oelp', # 0x86
'oelh', # 0x87
'oem', # 0x88
'oeb', # 0x89
'oebs', # 0x8a
'oes', # 0x8b
'oess', # 0x8c
'oeng', # 0x8d
'oej', # 0x8e
'oec', # 0x8f
'oek', # 0x90
'oet', # 0x91
'oep', # 0x92
'oeh', # 0x93
'yo', # 0x94
'yog', # 0x95
'yogg', # 0x96
'yogs', # 0x97
'yon', # 0x98
'yonj', # 0x99
'yonh', # 0x9a
'yod', # 0x9b
'yol', # 0x9c
'yolg', # 0x9d
'yolm', # 0x9e
'yolb', # 0x9f
'yols', # 0xa0
'yolt', # 0xa1
'yolp', # 0xa2
'yolh', # 0xa3
'yom', # 0xa4
'yob', # 0xa5
'yobs', # 0xa6
'yos', # 0xa7
'yoss', # 0xa8
'yong', # 0xa9
'yoj', # 0xaa
'yoc', # 0xab
'yok', # 0xac
'yot', # 0xad
'yop', # 0xae
'yoh', # 0xaf
'u', # 0xb0
'ug', # 0xb1
'ugg', # 0xb2
'ugs', # 0xb3
'un', # 0xb4
'unj', # 0xb5
'unh', # 0xb6
'ud', # 0xb7
'ul', # 0xb8
'ulg', # 0xb9
'ulm', # 0xba
'ulb', # 0xbb
'uls', # 0xbc
'ult', # 0xbd
'ulp', # 0xbe
'ulh', # 0xbf
'um', # 0xc0
'ub', # 0xc1
'ubs', # 0xc2
'us', # 0xc3
'uss', # 0xc4
'ung', # 0xc5
'uj', # 0xc6
'uc', # 0xc7
'uk', # 0xc8
'ut', # 0xc9
'up', # 0xca
'uh', # 0xcb
'weo', # 0xcc
'weog', # 0xcd
'weogg', # 0xce
'weogs', # 0xcf
'weon', # 0xd0
'weonj', # 0xd1
'weonh', # 0xd2
'weod', # 0xd3
'weol', # 0xd4
'weolg', # 0xd5
'weolm', # 0xd6
'weolb', # 0xd7
'weols', # 0xd8
'weolt', # 0xd9
'weolp', # 0xda
'weolh', # 0xdb
'weom', # 0xdc
'weob', # 0xdd
'weobs', # 0xde
'weos', # 0xdf
'weoss', # 0xe0
'weong', # 0xe1
'weoj', # 0xe2
'weoc', # 0xe3
'weok', # 0xe4
'weot', # 0xe5
'weop', # 0xe6
'weoh', # 0xe7
'we', # 0xe8
'weg', # 0xe9
'wegg', # 0xea
'wegs', # 0xeb
'wen', # 0xec
'wenj', # 0xed
'wenh', # 0xee
'wed', # 0xef
'wel', # 0xf0
'welg', # 0xf1
'welm', # 0xf2
'welb', # 0xf3
'wels', # 0xf4
'welt', # 0xf5
'welp', # 0xf6
'welh', # 0xf7
'wem', # 0xf8
'web', # 0xf9
'webs', # 0xfa
'wes', # 0xfb
'wess', # 0xfc
'weng', # 0xfd
'wej', # 0xfe
'wec', # 0xff
)
|
gpl-3.0
|
h-hirokawa/swampdragon
|
setup.py
|
1
|
1269
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="SwampDragon",
version="0.3.8.2",
author="Jonas Hagstedt",
author_email="[email protected]",
description=("SwampDragon is a powerful platform making it easy to build real time web applications, combining the power of Django and Tornado"),
license="BSD",
keywords="SwampDragon, websockets, realtime, sockjs, django, tornado, framework",
url="http://swampdragon.net",
packages=find_packages(),
long_description=read('README.txt'),
include_package_data=True,
entry_points={'console_scripts': ['dragon-admin = swampdragon.core:run', ]},
install_requires=[
"Django >= 1.6, < 1.8",
"Tornado >= 3.2.2",
"sockjs-tornado >= 1.0.0",
"tornado-redis >= 2.4.18",
"redis >= 2.8",
"python-dateutil >= 2.2"
],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
)
|
bsd-3-clause
|
apple/swift
|
benchmark/scripts/perf_test_driver/perf_test_driver.py
|
12
|
5953
|
#!/usr/bin/env python
# ===--- perf_test_driver.py ---------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
from __future__ import print_function
import functools
import glob
import multiprocessing
import os
import platform
import re
import subprocess
BENCHMARK_OUTPUT_RE = re.compile(r"\d+,([^,]+)")
class Result(object):
def __init__(self, name, status, output, xfail_list):
self.name = name
self.status = status
self.output = output
self.is_xfailed = any((re.match(x, self.name) is not None for x in xfail_list))
def is_failure(self):
return self.get_result() in ["FAIL", "XPASS"]
def get_result(self):
if self.is_xfailed:
if self.status:
return "XFAIL"
return "XPASS"
if self.status:
return "FAIL"
return "PASS"
def get_name(self):
return self.name
def merge_in_extra_data(self, d):
"""Rather than modifying the extra data dict, return it as a no-op"""
return d
def print_data(self, max_test_len):
fmt = "{:<%d}{:}" % (max_test_len + 5)
print(fmt.format(self.get_name(), self.get_result()))
def run_with_timeout(func, args):
# We timeout after 10 minutes.
timeout_seconds = 10 * 60
# We just use this to create a timeout since we use an older python. Once
# we update to use python >= 3.3, use the timeout API on communicate
# instead.
import multiprocessing.dummy
fakeThreadPool = multiprocessing.dummy.Pool(1)
try:
result = fakeThreadPool.apply_async(func, args=args)
return result.get(timeout_seconds)
except multiprocessing.TimeoutError:
fakeThreadPool.terminate()
raise RuntimeError(
"Child process aborted due to timeout. "
"Timeout: %s seconds" % timeout_seconds
)
def _unwrap_self(args):
return type(args[0]).process_input(*args)
def get_benchmark_executable(binary_dir, opt_level):
suffix = opt_level + "-" + platform.machine() + "*"
pattern = os.path.join(binary_dir, "Benchmark_" + suffix)
executables = glob.glob(pattern)
if len(executables) == 0:
raise ValueError(
"No benchmark executable for file name pattern " +
pattern + " found")
if len(executables) > 1:
raise ValueError(
"Multiple benchmark executables for file name pattern " +
pattern + " found\n" +
str(executables))
return executables[0]
BenchmarkDriver_OptLevels = ["Onone", "O", "Osize"]
class BenchmarkDriver(object):
def __init__(
self,
binary_dir,
xfail_list,
enable_parallel=False,
opt_levels=BenchmarkDriver_OptLevels,
):
self.targets = [
(get_benchmark_executable(binary_dir, o), o) for o in opt_levels
]
self.xfail_list = xfail_list
self.enable_parallel = enable_parallel
self.data = None
def print_data_header(self, max_test_len):
fmt = "{:<%d}{:}" % (max_test_len + 5)
print(fmt.format("Name", "Result"))
def prepare_input(self, name, opt_level):
raise RuntimeError("Abstract method")
def process_input(self, data):
raise RuntimeError("Abstract method")
def run_for_opt_level(self, binary, opt_level, test_filter):
print("testing driver at path: %s" % binary)
names = []
output = subprocess.check_output([binary, "--list"], universal_newlines=True)
for line in output.split("\n")[1:]:
m = BENCHMARK_OUTPUT_RE.match(line)
if m is None:
continue
names.append(m.group(1))
if test_filter:
regex = re.compile(test_filter)
names = [n for n in names if regex.match(n)]
def prepare_input_wrapper(name):
x = {"opt": opt_level, "path": binary, "test_name": name}
x.update(self.prepare_input(name))
return x
prepared_input = [prepare_input_wrapper(n) for n in names]
results = None
if self.enable_parallel:
p = multiprocessing.Pool()
z = zip([self] * len(prepared_input), prepared_input)
results = p.map_async(_unwrap_self, z).get(999999)
else:
results = map(self.process_input, prepared_input)
def reduce_results(acc, r):
acc["result"].append(r)
acc["has_failure"] = acc["has_failure"] or r.is_failure()
acc["max_test_len"] = max(acc["max_test_len"], len(r.get_name()))
acc["extra_data"] = r.merge_in_extra_data(acc["extra_data"])
return acc
return functools.reduce(
reduce_results,
results,
{"result": [], "has_failure": False, "max_test_len": 0, "extra_data": {}},
)
def print_data(self, data, max_test_len):
print("Results:")
self.print_data_header(max_test_len)
for d in data:
for r in d["result"]:
r.print_data(max_test_len)
def run(self, test_filter=None):
self.data = [
self.run_for_opt_level(binary, opt_level, test_filter)
for binary, opt_level in self.targets
]
max_test_len = functools.reduce(max, [d["max_test_len"] for d in self.data])
has_failure = functools.reduce(max, [d["has_failure"] for d in self.data])
self.print_data(self.data, max_test_len)
return not has_failure
|
apache-2.0
|
t-artistik/browserscope
|
categories/richtext2/handlers.py
|
16
|
4007
|
#!/usr/bin/python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for New Rich Text Tests"""
__author__ = '[email protected] (Roland Steiner)'
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
import django
from django import http
from django import shortcuts
from django.template import add_to_builtins
add_to_builtins('base.custom_filters')
# Shared stuff
from categories import all_test_sets
from base import decorators
from base import util
# common to the RichText2 suite
from categories.richtext2 import common
# tests
from categories.richtext2.tests.apply import APPLY_TESTS
from categories.richtext2.tests.applyCSS import APPLY_TESTS_CSS
from categories.richtext2.tests.change import CHANGE_TESTS
from categories.richtext2.tests.changeCSS import CHANGE_TESTS_CSS
from categories.richtext2.tests.delete import DELETE_TESTS
from categories.richtext2.tests.forwarddelete import FORWARDDELETE_TESTS
from categories.richtext2.tests.insert import INSERT_TESTS
from categories.richtext2.tests.selection import SELECTION_TESTS
from categories.richtext2.tests.unapply import UNAPPLY_TESTS
from categories.richtext2.tests.unapplyCSS import UNAPPLY_TESTS_CSS
from categories.richtext2.tests.querySupported import QUERYSUPPORTED_TESTS
from categories.richtext2.tests.queryEnabled import QUERYENABLED_TESTS
from categories.richtext2.tests.queryIndeterm import QUERYINDETERM_TESTS
from categories.richtext2.tests.queryState import QUERYSTATE_TESTS, QUERYSTATE_TESTS_CSS
from categories.richtext2.tests.queryValue import QUERYVALUE_TESTS, QUERYVALUE_TESTS_CSS
def About(request):
"""About page."""
overview = """These tests cover browers' implementations of
<a href="http://blog.whatwg.org/the-road-to-html-5-contenteditable">contenteditable</a>
for basic rich text formatting commands. Most browser implementations do very
well at editing the HTML which is generated by their own execCommands. But a
big problem happens when developers try to make cross-browser web
applications using contenteditable - most browsers are not able to correctly
change formatting generated by other browsers. On top of that, most browsers
allow users to to paste arbitrary HTML from other webpages into a
contenteditable region, which is even harder for browsers to properly
format. These tests check how well the execCommand, queryCommandState,
and queryCommandValue functions work with different types of HTML."""
return util.About(request, common.CATEGORY, category_title='Rich Text',
overview=overview, show_hidden=False)
def RunRichText2Tests(request):
params = {
'classes': common.CLASSES,
'commonIDPrefix': common.TEST_ID_PREFIX,
'strict': False,
'suites': [
SELECTION_TESTS,
APPLY_TESTS,
APPLY_TESTS_CSS,
CHANGE_TESTS,
CHANGE_TESTS_CSS,
UNAPPLY_TESTS,
UNAPPLY_TESTS_CSS,
DELETE_TESTS,
FORWARDDELETE_TESTS,
INSERT_TESTS,
QUERYSUPPORTED_TESTS,
QUERYENABLED_TESTS,
QUERYINDETERM_TESTS,
QUERYSTATE_TESTS,
QUERYSTATE_TESTS_CSS,
QUERYVALUE_TESTS,
QUERYVALUE_TESTS_CSS
]
}
return shortcuts.render_to_response('%s/templates/richtext2.html' % common.CATEGORY, params)
|
apache-2.0
|
nju520/django
|
django/conf/locale/pt/formats.py
|
504
|
1717
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
|
pquerna/cloud-init-debian-pkg-dead
|
cloudinit/config/cc_seed_random.py
|
6
|
3161
|
# vi: ts=4 expandtab
#
# Copyright (C) 2013 Yahoo! Inc.
# Copyright (C) 2014 Canonical, Ltd
#
# Author: Joshua Harlow <[email protected]>
# Author: Dustin Kirkland <[email protected]>
# Author: Scott Moser <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import os
from StringIO import StringIO
from cloudinit.settings import PER_INSTANCE
from cloudinit import log as logging
from cloudinit import util
frequency = PER_INSTANCE
LOG = logging.getLogger(__name__)
def _decode(data, encoding=None):
if not data:
return ''
if not encoding or encoding.lower() in ['raw']:
return data
elif encoding.lower() in ['base64', 'b64']:
return base64.b64decode(data)
elif encoding.lower() in ['gzip', 'gz']:
return util.decomp_gzip(data, quiet=False)
else:
raise IOError("Unknown random_seed encoding: %s" % (encoding))
def handle_random_seed_command(command, required, env=None):
if not command and required:
raise ValueError("no command found but required=true")
elif not command:
LOG.debug("no command provided")
return
cmd = command[0]
if not util.which(cmd):
if required:
raise ValueError("command '%s' not found but required=true", cmd)
else:
LOG.debug("command '%s' not found for seed_command", cmd)
return
util.subp(command, env=env, capture=False)
def handle(name, cfg, cloud, log, _args):
mycfg = cfg.get('random_seed', {})
seed_path = mycfg.get('file', '/dev/urandom')
seed_data = mycfg.get('data', '')
seed_buf = StringIO()
if seed_data:
seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
# 'random_seed' is set up by Azure datasource, and comes already in
# openstack meta_data.json
metadata = cloud.datasource.metadata
if metadata and 'random_seed' in metadata:
seed_buf.write(metadata['random_seed'])
seed_data = seed_buf.getvalue()
if len(seed_data):
log.debug("%s: adding %s bytes of random seed entropy to %s", name,
len(seed_data), seed_path)
util.append_file(seed_path, seed_data)
command = mycfg.get('command', ['pollinate', '-q'])
req = mycfg.get('command_required', False)
try:
env = os.environ.copy()
env['RANDOM_SEED_FILE'] = seed_path
handle_random_seed_command(command=command, required=req, env=env)
except ValueError as e:
log.warn("handling random command [%s] failed: %s", command, e)
raise e
|
gpl-3.0
|
vmora/QGIS
|
python/plugins/processing/algs/grass7/ext/v_net_path.py
|
12
|
1468
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
v_net_path.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .v_net import incorporatePoints, variableOutput
def processCommand(alg, parameters, context, feedback):
incorporatePoints(alg, parameters, context, feedback)
def processOutputs(alg, parameters, context, feedback):
outputParameter = {'output': ['output', 'line', 1, False]}
variableOutput(alg, outputParameter, parameters, context)
|
gpl-2.0
|
vasyarv/edx-platform
|
common/djangoapps/performance/views/__init__.py
|
100
|
1765
|
import datetime
import json
import logging
from django.http import HttpResponse
from track.utils import DateTimeJSONEncoder
perflog = logging.getLogger("perflog")
def _get_request_header(request, header_name, default=''):
"""Helper method to get header values from a request's META dict, if present."""
if request is not None and hasattr(request, 'META') and header_name in request.META:
return request.META[header_name]
else:
return default
def _get_request_value(request, value_name, default=''):
"""Helper method to get header values from a request's REQUEST dict, if present."""
if request is not None and hasattr(request, 'REQUEST') and value_name in request.REQUEST:
return request.REQUEST[value_name]
else:
return default
def performance_log(request):
"""
Log when POST call to "performance" URL is made by a user.
Request should provide "event" and "page" arguments.
"""
event = {
"ip": _get_request_header(request, 'REMOTE_ADDR'),
"referer": _get_request_header(request, 'HTTP_REFERER'),
"accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'),
"event_source": "browser",
"event": _get_request_value(request, 'event'),
"agent": _get_request_header(request, 'HTTP_USER_AGENT'),
"page": _get_request_value(request, 'page'),
"id": _get_request_value(request, 'id'),
"expgroup": _get_request_value(request, 'expgroup'),
"value": _get_request_value(request, 'value'),
"time": datetime.datetime.utcnow(),
"host": _get_request_header(request, 'SERVER_NAME'),
}
perflog.info(json.dumps(event, cls=DateTimeJSONEncoder))
return HttpResponse(status=204)
|
agpl-3.0
|
victorlin/ez2pay
|
ez2pay/models/user.py
|
1
|
6784
|
from __future__ import unicode_literals
import os
import hmac
import hashlib
from . import tables
from .base import BaseTableModel
class AuthError(RuntimeError):
"""Authentication error
"""
class BadPassword(AuthError):
"""Raised when user tries to authenticate with wrong password
"""
class UserNotExist(AuthError):
"""Raised when user tries to authenticate with a non-exist user
"""
class UserModel(BaseTableModel):
"""User data model
"""
TABLE = tables.User
def get_by_name(self, user_name):
"""Get a user by name
"""
user = (
self.session
.query(tables.User)
.filter_by(user_name=user_name)
.first()
)
return user
def get_by_email(self, email):
"""Get a user by email
"""
user = (
self.session
.query(tables.User)
.filter_by(email=email)
.first()
)
return user
def create(
self,
user_name,
display_name,
password,
email,
verified=False,
):
"""Create a new user and return verification
"""
user_name = user_name.lower()
email = email.lower()
salt_hashedpassword = ''.join(self.get_salt_hashedpassword(password))
# create user
user = tables.User(
user_name=unicode(user_name),
email=unicode(email),
display_name=unicode(display_name),
password=salt_hashedpassword,
created=tables.now_func(),
verified=verified,
)
self.session.add(user)
# flush the change, so we can get real user id
self.session.flush()
assert user.user_id is not None, 'User id should not be none here'
user_id = user.user_id
self.logger.info('Create user %s', user_name)
return user_id
def get_salt_hashedpassword(self, password):
"""Generate salt and hashed password,
salt is a 160bits random string, this is meant to protect the hashed
password from query table attack
hashedpassword is SHA1(password, salt)
return value is (hexdigest of salt, hexdigest of hashedpassword)
"""
if isinstance(password, unicode):
password_utf8 = password.encode('utf8')
else:
password_utf8 = password
# generate salt
salt = hashlib.sha1()
# NOTICE: notice, os.urandom uses /dev/urandom under Linux
# this function call will get blocked if there is no available
# random bytes in /dev/urandom. An attacker could perform a
# DOS attack based on this factor
salt.update(os.urandom(16))
# generate hashed password
hashedpassword = hashlib.sha1()
hashedpassword.update(password_utf8 + salt.hexdigest())
return salt.hexdigest(), hashedpassword.hexdigest()
def validate_password(self, user_id, password):
"""Validate password of a user
"""
user = self.get(user_id)
if user is None:
raise UserNotExist
salt_hashedpassword = user.password
salt = salt_hashedpassword[:40]
hashedpassword = salt_hashedpassword[40:]
input_hashedpassword = hashlib.sha1(password + salt).hexdigest()
return hashedpassword == input_hashedpassword
def authenticate_user(self, name_or_email, password):
"""Authenticate user by user_name of email and password. If the user
pass the authentication, return user_id, otherwise, raise error
"""
from sqlalchemy.sql.expression import or_
User = tables.User
user = (
self.session
.query(User)
.filter(or_(User.user_name == name_or_email,
User.email == name_or_email))
.first()
)
if user is None:
# maybe it's case problem, although we enforce lower case to
# user name and email now, but it seems there is still some
# accounts have id in different cases, so that's why we do the
# user query twice
name_or_email = name_or_email.lower()
user = (
self.session
.query(User)
.filter(or_(User.user_name == name_or_email,
User.email == name_or_email))
.first()
)
if user is None:
raise UserNotExist('User %s does not exist' % name_or_email)
if not self.validate_password(user.user_id, password):
raise BadPassword('Bad password')
return user.user_id
def update_password(self, user_id, password):
"""Update password of an user
"""
user = self.get(user_id, raise_error=True)
if user is None:
raise KeyError
salt_hashedpassword = ''.join(self.get_salt_hashedpassword(password))
user.password = salt_hashedpassword
self.session.add(user)
def update_user(self, user_id, **kwargs):
"""Update attributes of a user
"""
user = self.get(user_id, raise_error=True)
if 'display_name' in kwargs:
user.display_name = kwargs['display_name']
if 'email' in kwargs:
user.email = kwargs['email']
if 'verified' in kwargs:
user.verified = kwargs['verified']
self.session.add(user)
def update_groups(self, user_id, group_ids):
"""Update groups of this user
"""
user = self.get(user_id, raise_error=True)
new_groups = (
self.session
.query(tables.Group)
.filter(tables.Group.group_id.in_(group_ids))
)
user.groups = new_groups.all()
self.session.flush()
def get_recovery_code(self, key, user_id):
"""Get current recovery code of a user
"""
user = self.get(user_id, raise_error=True)
h = hmac.new(key)
h.update('%s%s%s%s' % (user_id, user.user_name, user.email, user.password))
return h.hexdigest()
def get_verification_code(self, user_id, verify_type, secret):
"""Get a verification code of user
"""
user = self.get(user_id, raise_error=True)
code_hash = hmac.new(secret)
code_hash.update(str(user_id))
code_hash.update(str(user.user_name))
code_hash.update(str(verify_type))
return code_hash.hexdigest()
|
mit
|
edxnercel/edx-platform
|
lms/djangoapps/courseware/grades.py
|
14
|
23832
|
# Compute grades using real division, with no integer truncation
from __future__ import division
from collections import defaultdict
import json
import random
import logging
from contextlib import contextmanager
from django.conf import settings
from django.db import transaction
from django.test.client import RequestFactory
import dogstats_wrapper as dog_stats_api
from courseware import courses
from courseware.model_data import FieldDataCache
from student.models import anonymous_id_for_user
from util.module_utils import yield_dynamic_descriptor_descendants
from xmodule import graders
from xmodule.graders import Score
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .models import StudentModule
from .module_render import get_module_for_descriptor
from submissions import api as sub_api # installed from the edx-submissions repository
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.signals.signals import GRADES_UPDATED
log = logging.getLogger("edx.courseware")
def answer_distributions(course_key):
"""
Given a course_key, return answer distributions in the form of a dictionary
mapping:
(problem url_name, problem display_name, problem_id) -> {dict: answer -> count}
Answer distributions are found by iterating through all StudentModule
entries for a given course with type="problem" and a grade that is not null.
This means that we only count LoncapaProblems that people have submitted.
Other types of items like ORA or sequences will not be collected. Empty
Loncapa problem state that gets created from runnig the progress page is
also not counted.
This method accesses the StudentModule table directly instead of using the
CapaModule abstraction. The main reason for this is so that we can generate
the report without any side-effects -- we don't have to worry about answer
distribution potentially causing re-evaluation of the student answer. This
also allows us to use the read-replica database, which reduces risk of bad
locking behavior. And quite frankly, it makes this a lot less confusing.
Also, we're pulling all available records from the database for this course
rather than crawling through a student's course-tree -- the latter could
potentially cause us trouble with A/B testing. The distribution report may
not be aware of problems that are not visible to the user being used to
generate the report.
This method will try to use a read-replica database if one is available.
"""
# dict: { module.module_state_key : (url_name, display_name) }
state_keys_to_problem_info = {} # For caching, used by url_and_display_name
def url_and_display_name(usage_key):
"""
For a given usage_key, return the problem's url and display_name.
Handle modulestore access and caching. This method ignores permissions.
Raises:
InvalidKeyError: if the usage_key does not parse
ItemNotFoundError: if there is no content that corresponds
to this usage_key.
"""
problem_store = modulestore()
if usage_key not in state_keys_to_problem_info:
problem = problem_store.get_item(usage_key)
problem_info = (problem.url_name, problem.display_name_with_default)
state_keys_to_problem_info[usage_key] = problem_info
return state_keys_to_problem_info[usage_key]
# Iterate through all problems submitted for this course in no particular
# order, and build up our answer_counts dict that we will eventually return
answer_counts = defaultdict(lambda: defaultdict(int))
for module in StudentModule.all_submitted_problems_read_only(course_key):
try:
state_dict = json.loads(module.state) if module.state else {}
raw_answers = state_dict.get("student_answers", {})
except ValueError:
log.error(
u"Answer Distribution: Could not parse module state for StudentModule id=%s, course=%s",
module.id,
course_key,
)
continue
try:
url, display_name = url_and_display_name(module.module_state_key.map_into_course(course_key))
# Each problem part has an ID that is derived from the
# module.module_state_key (with some suffix appended)
for problem_part_id, raw_answer in raw_answers.items():
# Convert whatever raw answers we have (numbers, unicode, None, etc.)
# to be unicode values. Note that if we get a string, it's always
# unicode and not str -- state comes from the json decoder, and that
# always returns unicode for strings.
answer = unicode(raw_answer)
answer_counts[(url, display_name, problem_part_id)][answer] += 1
except (ItemNotFoundError, InvalidKeyError):
msg = "Answer Distribution: Item {} referenced in StudentModule {} " + \
"for user {} in course {} not found; " + \
"This can happen if a student answered a question that " + \
"was later deleted from the course. This answer will be " + \
"omitted from the answer distribution CSV."
log.warning(
msg.format(module.module_state_key, module.id, module.student_id, course_key)
)
continue
return answer_counts
@transaction.commit_manually
def grade(student, request, course, keep_raw_scores=False):
"""
Wraps "_grade" with the manual_transaction context manager just in case
there are unanticipated errors.
Send a signal to update the minimum grade requirement status.
"""
with manual_transaction():
grade_summary = _grade(student, request, course, keep_raw_scores)
responses = GRADES_UPDATED.send_robust(
sender=None,
username=request.user.username,
grade_summary=grade_summary,
course_key=course.id,
deadline=course.end
)
for receiver, response in responses:
log.info('Signal fired when student grade is calculated. Receiver: %s. Response: %s', receiver, response)
return grade_summary
def _grade(student, request, course, keep_raw_scores):
"""
Unwrapped version of "grade"
This grades a student as quickly as possible. It returns the
output from the course grader, augmented with the final letter
grade. The keys in the output are:
course: a CourseDescriptor
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
- keep_raw_scores : if True, then value for key 'raw_scores' contains scores
for every graded module
More information on the format is in the docstring for CourseGrader.
"""
grading_context = course.grading_context
raw_scores = []
# Dict of item_ids -> (earned, possible) point tuples. This *only* grabs
# scores that were registered with the submissions API, which for the moment
# means only openassessment (edx-ora2)
submissions_scores = sub_api.get_scores(
course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)
)
totaled_scores = {}
# This next complicated loop is just to collect the totaled_scores, which is
# passed to the grader
for section_format, sections in grading_context['graded_sections'].iteritems():
format_scores = []
for section in sections:
section_descriptor = section['section_descriptor']
section_name = section_descriptor.display_name_with_default
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. foldit.,
# combinedopenended)
should_grade_section = any(
descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']
)
# If there are no problems that always have to be regraded, check to
# see if any of our locations are in the scores from the submissions
# API. If scores exist, we have to calculate grades for this section.
if not should_grade_section:
should_grade_section = any(
descriptor.location.to_deprecated_string() in submissions_scores
for descriptor in section['xmoduledescriptors']
)
if not should_grade_section:
with manual_transaction():
should_grade_section = StudentModule.objects.filter(
student=student,
module_state_key__in=[
descriptor.location for descriptor in section['xmoduledescriptors']
]
).exists()
# If we haven't seen a single problem in the section, we don't have
# to grade it at all! We can assume 0%
if should_grade_section:
scores = []
def create_module(descriptor):
'''creates an XModule instance given a descriptor'''
# TODO: We need the request to pass into here. If we could forego that, our arguments
# would be simpler
with manual_transaction():
field_data_cache = FieldDataCache([descriptor], course.id, student)
return get_module_for_descriptor(
student, request, descriptor, field_data_cache, course.id, course=course
)
for module_descriptor in yield_dynamic_descriptor_descendants(
section_descriptor, student.id, create_module
):
(correct, total) = get_score(
course.id, student, module_descriptor, create_module, scores_cache=submissions_scores
)
if correct is None and total is None:
continue
if settings.GENERATE_PROFILE_SCORES: # for debugging!
if total > 1:
correct = random.randrange(max(total - 2, 1), total + 1)
else:
correct = total
graded = module_descriptor.graded
if not total > 0:
# We simply cannot grade a problem that is 12/0, because we might need it as a percentage
graded = False
scores.append(
Score(
correct,
total,
graded,
module_descriptor.display_name_with_default,
module_descriptor.location
)
)
_, graded_total = graders.aggregate_scores(scores, section_name)
if keep_raw_scores:
raw_scores += scores
else:
graded_total = Score(0.0, 1.0, True, section_name, None)
#Add the graded total to totaled_scores
if graded_total.possible > 0:
format_scores.append(graded_total)
else:
log.info(
"Unable to grade a section with a total possible score of zero. " +
str(section_descriptor.location)
)
totaled_scores[section_format] = format_scores
# Grading policy might be overriden by a CCX, need to reset it
course.set_grading_policy(course.grading_policy)
grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)
# We round the grade here, to make sure that the grade is an whole percentage and
# doesn't get displayed differently than it gets grades
grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100
letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])
grade_summary['grade'] = letter_grade
grade_summary['totaled_scores'] = totaled_scores # make this available, eg for instructor download & debugging
if keep_raw_scores:
# way to get all RAW scores out to instructor
# so grader can be double-checked
grade_summary['raw_scores'] = raw_scores
return grade_summary
def grade_for_percentage(grade_cutoffs, percentage):
"""
Returns a letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None.
Arguments
- grade_cutoffs is a dictionary mapping a grade to the lowest
possible percentage to earn that grade.
- percentage is the final percent across all problems in a course
"""
letter_grade = None
# Possible grades, sorted in descending order of score
descending_grades = sorted(grade_cutoffs, key=lambda x: grade_cutoffs[x], reverse=True)
for possible_grade in descending_grades:
if percentage >= grade_cutoffs[possible_grade]:
letter_grade = possible_grade
break
return letter_grade
@transaction.commit_manually
def progress_summary(student, request, course):
"""
Wraps "_progress_summary" with the manual_transaction context manager just
in case there are unanticipated errors.
"""
with manual_transaction():
return _progress_summary(student, request, course)
# TODO: This method is not very good. It was written in the old course style and
# then converted over and performance is not good. Once the progress page is redesigned
# to not have the progress summary this method should be deleted (so it won't be copied).
def _progress_summary(student, request, course):
"""
Unwrapped version of "progress_summary".
This pulls a summary of all problems in the course.
Returns
- courseware_summary is a summary of all sections with problems in the course.
It is organized as an array of chapters, each containing an array of sections,
each containing an array of scores. This contains information for graded and
ungraded problems, and is good for displaying a course summary with due dates,
etc.
Arguments:
student: A User object for the student to grade
course: A Descriptor containing the course to grade
If the student does not have access to load the course module, this function
will return None.
"""
with manual_transaction():
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, student, course, depth=None
)
# TODO: We need the request to pass into here. If we could
# forego that, our arguments would be simpler
course_module = get_module_for_descriptor(
student, request, course, field_data_cache, course.id, course=course
)
if not course_module:
# This student must not have access to the course.
return None
course_module = getattr(course_module, '_x_module', course_module)
submissions_scores = sub_api.get_scores(course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id))
chapters = []
# Don't include chapters that aren't displayable (e.g. due to error)
for chapter_module in course_module.get_display_items():
# Skip if the chapter is hidden
if chapter_module.hide_from_toc:
continue
sections = []
for section_module in chapter_module.get_display_items():
# Skip if the section is hidden
with manual_transaction():
if section_module.hide_from_toc:
continue
graded = section_module.graded
scores = []
module_creator = section_module.xmodule_runtime.get_module
for module_descriptor in yield_dynamic_descriptor_descendants(
section_module, student.id, module_creator
):
course_id = course.id
(correct, total) = get_score(
course_id, student, module_descriptor, module_creator, scores_cache=submissions_scores
)
if correct is None and total is None:
continue
scores.append(
Score(
correct,
total,
graded,
module_descriptor.display_name_with_default,
module_descriptor.location
)
)
scores.reverse()
section_total, _ = graders.aggregate_scores(
scores, section_module.display_name_with_default)
module_format = section_module.format if section_module.format is not None else ''
sections.append({
'display_name': section_module.display_name_with_default,
'url_name': section_module.url_name,
'scores': scores,
'section_total': section_total,
'format': module_format,
'due': section_module.due,
'graded': graded,
})
chapters.append({
'course': course.display_name_with_default,
'display_name': chapter_module.display_name_with_default,
'url_name': chapter_module.url_name,
'sections': sections
})
return chapters
def get_score(course_id, user, problem_descriptor, module_creator, scores_cache=None):
"""
Return the score for a user on a problem, as a tuple (correct, total).
e.g. (5,7) if you got 5 out of 7 points.
If this problem doesn't have a score, or we couldn't load it, returns (None,
None).
user: a Student object
problem_descriptor: an XModuleDescriptor
module_creator: a function that takes a descriptor, and returns the corresponding XModule for this user.
Can return None if user doesn't have access, or if something else went wrong.
scores_cache: A dict of location names to (earned, possible) point tuples.
If an entry is found in this cache, it takes precedence.
"""
scores_cache = scores_cache or {}
if not user.is_authenticated():
return (None, None)
location_url = problem_descriptor.location.to_deprecated_string()
if location_url in scores_cache:
return scores_cache[location_url]
# some problems have state that is updated independently of interaction
# with the LMS, so they need to always be scored. (E.g. foldit.)
if problem_descriptor.always_recalculate_grades:
problem = module_creator(problem_descriptor)
if problem is None:
return (None, None)
score = problem.get_score()
if score is not None:
return (score['score'], score['total'])
else:
return (None, None)
if not problem_descriptor.has_score:
# These are not problems, and do not have a score
return (None, None)
try:
student_module = StudentModule.objects.get(
student=user,
course_id=course_id,
module_state_key=problem_descriptor.location
)
except StudentModule.DoesNotExist:
student_module = None
if student_module is not None and student_module.max_grade is not None:
correct = student_module.grade if student_module.grade is not None else 0
total = student_module.max_grade
else:
# If the problem was not in the cache, or hasn't been graded yet,
# we need to instantiate the problem.
# Otherwise, the max score (cached in student_module) won't be available
problem = module_creator(problem_descriptor)
if problem is None:
return (None, None)
correct = 0.0
total = problem.max_score()
# Problem may be an error module (if something in the problem builder failed)
# In which case total might be None
if total is None:
return (None, None)
# Now we re-weight the problem, if specified
weight = problem_descriptor.weight
if weight is not None:
if total == 0:
log.exception("Cannot reweight a problem with zero total points. Problem: " + str(student_module))
return (correct, total)
correct = correct * weight / total
total = weight
return (correct, total)
@contextmanager
def manual_transaction():
"""A context manager for managing manual transactions"""
try:
yield
except Exception:
transaction.rollback()
log.exception('Due to an error, this transaction has been rolled back')
raise
else:
transaction.commit()
def iterate_grades_for(course_or_id, students, keep_raw_scores=False):
"""Given a course_id and an iterable of students (User), yield a tuple of:
(student, gradeset, err_msg) for every student enrolled in the course.
If an error occurred, gradeset will be an empty dict and err_msg will be an
exception message. If there was no error, err_msg is an empty string.
The gradeset is a dictionary with the following fields:
- grade : A final letter grade.
- percent : The final percent for the class (rounded up).
- section_breakdown : A breakdown of each section that makes
up the grade. (For display)
- grade_breakdown : A breakdown of the major components that
make up the final grade. (For display)
- raw_scores: contains scores for every graded module
"""
if isinstance(course_or_id, (basestring, CourseKey)):
course = courses.get_course_by_id(course_or_id)
else:
course = course_or_id
# We make a fake request because grading code expects to be able to look at
# the request. We have to attach the correct user to the request before
# grading that student.
request = RequestFactory().get('/')
for student in students:
with dog_stats_api.timer('lms.grades.iterate_grades_for', tags=[u'action:{}'.format(course.id)]):
try:
request.user = student
# Grading calls problem rendering, which calls masquerading,
# which checks session vars -- thus the empty session dict below.
# It's not pretty, but untangling that is currently beyond the
# scope of this feature.
request.session = {}
gradeset = grade(student, request, course, keep_raw_scores)
yield student, gradeset, ""
except Exception as exc: # pylint: disable=broad-except
# Keep marching on even if this student couldn't be graded for
# some reason, but log it for future reference.
log.exception(
'Cannot grade student %s (%s) in course %s because of exception: %s',
student.username,
student.id,
course.id,
exc.message
)
yield student, {}, exc.message
|
agpl-3.0
|
Tatsh/youtube-dl
|
youtube_dl/extractor/webofstories.py
|
21
|
5550
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
orderedSet,
)
class WebOfStoriesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?webofstories\.com/play/(?:[^/]+/)?(?P<id>[0-9]+)'
_VIDEO_DOMAIN = 'http://eu-mobile.webofstories.com/'
_GREAT_LIFE_STREAMER = 'rtmp://eu-cdn1.webofstories.com/cfx/st/'
_USER_STREAMER = 'rtmp://eu-users.webofstories.com/cfx/st/'
_TESTS = [{
'url': 'http://www.webofstories.com/play/hans.bethe/71',
'md5': '373e4dd915f60cfe3116322642ddf364',
'info_dict': {
'id': '4536',
'ext': 'mp4',
'title': 'The temperature of the sun',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'Hans Bethe talks about calculating the temperature of the sun',
'duration': 238,
}
}, {
'url': 'http://www.webofstories.com/play/55908',
'md5': '2985a698e1fe3211022422c4b5ed962c',
'info_dict': {
'id': '55908',
'ext': 'mp4',
'title': 'The story of Gemmata obscuriglobus',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'Planctomycete talks about The story of Gemmata obscuriglobus',
'duration': 169,
},
'skip': 'notfound',
}, {
# malformed og:title meta
'url': 'http://www.webofstories.com/play/54215?o=MS',
'info_dict': {
'id': '54215',
'ext': 'mp4',
'title': '"A Leg to Stand On"',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'Oliver Sacks talks about the death and resurrection of a limb',
'duration': 97,
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# Sometimes og:title meta is malformed
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
r'(?s)<strong>Title:\s*</strong>(.+?)<', webpage, 'title')
description = self._html_search_meta('description', webpage)
thumbnail = self._og_search_thumbnail(webpage)
embed_params = [s.strip(" \r\n\t'") for s in self._search_regex(
r'(?s)\$\("#embedCode"\).html\(getEmbedCode\((.*?)\)',
webpage, 'embed params').split(',')]
(
_, speaker_id, story_id, story_duration,
speaker_type, great_life, _thumbnail, _has_subtitles,
story_filename, _story_order) = embed_params
is_great_life_series = great_life == 'true'
duration = int_or_none(story_duration)
# URL building, see: http://www.webofstories.com/scripts/player.js
ms_prefix = ''
if speaker_type.lower() == 'ms':
ms_prefix = 'mini_sites/'
if is_great_life_series:
mp4_url = '{0:}lives/{1:}/{2:}.mp4'.format(
self._VIDEO_DOMAIN, speaker_id, story_filename)
rtmp_ext = 'flv'
streamer = self._GREAT_LIFE_STREAMER
play_path = 'stories/{0:}/{1:}'.format(
speaker_id, story_filename)
else:
mp4_url = '{0:}{1:}{2:}/{3:}.mp4'.format(
self._VIDEO_DOMAIN, ms_prefix, speaker_id, story_filename)
rtmp_ext = 'mp4'
streamer = self._USER_STREAMER
play_path = 'mp4:{0:}{1:}/{2}.mp4'.format(
ms_prefix, speaker_id, story_filename)
formats = [{
'format_id': 'mp4_sd',
'url': mp4_url,
}, {
'format_id': 'rtmp_sd',
'page_url': url,
'url': streamer,
'ext': rtmp_ext,
'play_path': play_path,
}]
self._sort_formats(formats)
return {
'id': story_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
}
class WebOfStoriesPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?webofstories\.com/playAll/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.webofstories.com/playAll/donald.knuth',
'info_dict': {
'id': 'donald.knuth',
'title': 'Donald Knuth (Scientist)',
},
'playlist_mincount': 97,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result(
'http://www.webofstories.com/play/%s' % video_id,
'WebOfStories', video_id=video_id)
for video_id in orderedSet(re.findall(r'\bid=["\']td_(\d+)', webpage))
]
title = self._search_regex(
r'<div id="speakerName">\s*<span>([^<]+)</span>',
webpage, 'speaker', default=None)
if title:
field = self._search_regex(
r'<span id="primaryField">([^<]+)</span>',
webpage, 'field', default=None)
if field:
title += ' (%s)' % field
if not title:
title = self._search_regex(
r'<title>Play\s+all\s+stories\s*-\s*([^<]+)\s*-\s*Web\s+of\s+Stories</title>',
webpage, 'title')
return self.playlist_result(entries, playlist_id, title)
|
unlicense
|
StegSchreck/RatS
|
tests/unit/trakt/test_trakt_ratings_inserter.py
|
1
|
10089
|
import os
from unittest import TestCase
from unittest.mock import patch
from RatS.trakt.trakt_ratings_inserter import TraktRatingsInserter
TESTDATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'assets'))
class TraktRatingsInserterTest(TestCase):
def setUp(self):
if not os.path.exists(os.path.join(TESTDATA_PATH, 'exports')):
os.makedirs(os.path.join(TESTDATA_PATH, 'exports'))
self.movie = dict()
self.movie['title'] = 'Fight Club'
self.movie['year'] = 1999
self.movie['imdb'] = dict()
self.movie['imdb']['id'] = 'tt0137523'
self.movie['imdb']['url'] = 'https://www.imdb.com/title/tt0137523'
self.movie['imdb']['my_rating'] = 9
self.movie['tmdb'] = dict()
self.movie['tmdb']['id'] = '550'
self.movie['tmdb']['url'] = 'https://www.themoviedb.org/movie/550'
with open(os.path.join(TESTDATA_PATH, 'trakt', 'search_result.html'), encoding='UTF-8') as search_results:
self.search_results = search_results.read()
with open(os.path.join(TESTDATA_PATH, 'trakt', 'search_result_tile.html'), encoding='UTF-8') as result_tile:
self.search_result_tile_list = [result_tile.read()]
with open(os.path.join(TESTDATA_PATH, 'trakt', 'movie_details_page.html'),
encoding='UTF-8') as movie_details_page:
self.movie_details_page = movie_details_page.read()
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_init(self, browser_mock, base_init_mock):
TraktRatingsInserter(None)
self.assertTrue(base_init_mock.called)
@patch('RatS.base.base_ratings_inserter.RatingsInserter._print_progress_bar')
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._is_requested_movie')
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._get_search_results')
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_insert(self, browser_mock, base_init_mock, site_mock, overview_page_mock, # pylint: disable=too-many-arguments
eq_check_mock, progress_print_mock):
overview_page_mock.return_value = self.search_result_tile_list
eq_check_mock.return_value = True
site_mock.browser = browser_mock
inserter = TraktRatingsInserter(None)
inserter.args = False
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
inserter.insert([self.movie], 'IMDB')
self.assertTrue(base_init_mock.called)
self.assertTrue(progress_print_mock.called)
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_external_link_compare_imdb_success(self, browser_mock, base_init_mock, site_mock):
site_mock.browser = browser_mock
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
result = inserter._compare_external_links(self.movie_details_page, self.movie, 'imdb.com', 'imdb') # pylint: disable=protected-access
self.assertTrue(result)
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_external_link_compare_imdb_fail(self, browser_mock, base_init_mock, site_mock):
site_mock.browser = browser_mock
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
movie2 = dict()
movie2['title'] = 'Fight Club'
movie2['year'] = 1999
movie2['imdb'] = dict()
movie2['imdb']['id'] = 'tt0137523'
movie2['imdb']['url'] = 'https://www.imdb.com/title/tt0137523'
movie2['imdb']['my_rating'] = 10
result = inserter._compare_external_links(self.movie_details_page, movie2, 'imdb.com', 'imdb') # pylint: disable=protected-access
self.assertTrue(result)
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_external_link_compare_tmdb_success(self, browser_mock, base_init_mock, site_mock):
site_mock.browser = browser_mock
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
result = inserter._compare_external_links(self.movie_details_page, self.movie, 'themoviedb.org', 'tmdb') # pylint: disable=protected-access
self.assertTrue(result)
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_external_link_compare_tmdb_fail(self, browser_mock, base_init_mock, site_mock):
site_mock.browser = browser_mock
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
movie2 = dict()
movie2['title'] = 'Arrival'
movie2['year'] = 2006
movie2['tmdb'] = dict()
movie2['tmdb']['id'] = '329865'
movie2['tmdb']['url'] = 'https://www.themoviedb.org/movie/329865'
movie2['tmdb']['my_rating'] = 7
result = inserter._compare_external_links(self.movie_details_page, movie2, 'themoviedb.org', 'tmdb') # pylint: disable=protected-access
self.assertFalse(result)
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._compare_external_links')
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_find_movie_success_by_imdb(self, browser_mock, base_init_mock, site_mock, compare_mock):
site_mock.browser = browser_mock
browser_mock.page_source = self.search_results
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
compare_mock.return_value = True
result = inserter._find_movie(self.movie) # pylint: disable=protected-access
self.assertTrue(result)
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._compare_external_links')
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_find_movie_success_by_tmdb(self, browser_mock, base_init_mock, site_mock, compare_mock):
site_mock.browser = browser_mock
browser_mock.page_source = self.search_results
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
compare_mock.return_value = True
movie2 = dict()
movie2['title'] = 'Fight Club'
movie2['year'] = 1999
movie2['tmdb'] = dict()
movie2['tmdb']['id'] = '550'
movie2['tmdb']['url'] = 'https://www.themoviedb.org/movie/550'
movie2['tmdb']['my_rating'] = 9
result = inserter._find_movie(movie2) # pylint: disable=protected-access
self.assertTrue(result)
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._compare_external_links')
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_find_movie_success_by_year(self, browser_mock, base_init_mock, site_mock, compare_mock):
site_mock.browser = browser_mock
browser_mock.page_source = self.search_results
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
compare_mock.return_value = True
movie2 = dict()
movie2['title'] = 'Fight Club'
movie2['year'] = 1999
result = inserter._find_movie(movie2) # pylint: disable=protected-access
self.assertTrue(result)
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._is_requested_movie')
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._get_search_results')
@patch('RatS.trakt.trakt_ratings_inserter.TraktRatingsInserter._compare_external_links')
@patch('RatS.trakt.trakt_ratings_inserter.Trakt')
@patch('RatS.base.base_ratings_inserter.RatingsInserter.__init__')
@patch('RatS.utils.browser_handler.Firefox')
def test_find_movie_fail(self, browser_mock, base_init_mock, site_mock, compare_mock, tiles_mock, equality_mock): # pylint: disable=too-many-arguments
site_mock.browser = browser_mock
browser_mock.page_source = self.search_results
inserter = TraktRatingsInserter(None)
inserter.site = site_mock
inserter.site.site_name = 'Trakt'
inserter.failed_movies = []
compare_mock.return_value = False
tiles_mock.return_value = self.search_result_tile_list
equality_mock.return_value = False
movie2 = dict()
movie2['title'] = 'The Matrix'
movie2['year'] = 1995
movie2['imdb'] = dict()
movie2['imdb']['id'] = 'tt0137523'
movie2['imdb']['url'] = 'https://www.imdb.com/title/tt0137523'
movie2['imdb']['my_rating'] = 9
result = inserter._find_movie(movie2) # pylint: disable=protected-access
self.assertFalse(result)
|
agpl-3.0
|
hectorip/PolymerBoilerplate
|
bp_includes/external/requests/packages/urllib3/exceptions.py
|
374
|
3274
|
# urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
|
gpl-2.0
|
quentinlautischer/291MiniProject2
|
lib/python3.5/site-packages/bsddb3/tests/test_replication.py
|
1
|
21526
|
"""
Copyright (c) 2008-2015, Jesus Cea Avion <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Jesus Cea Avion nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""TestCases for distributed transactions.
"""
import os
import time
import unittest
from .test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplication(unittest.TestCase) :
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
class DBReplicationManager(DBReplication) :
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
client_port = test_support.find_unused_port()
if db.version() >= (5, 2) :
self.site = self.dbenvMaster.repmgr_site("127.0.0.1", master_port)
self.site.set_config(db.DB_GROUP_CREATOR, True)
self.site.set_config(db.DB_LOCAL_SITE, True)
self.site2 = self.dbenvMaster.repmgr_site("127.0.0.1", client_port)
self.site3 = self.dbenvClient.repmgr_site("127.0.0.1", master_port)
self.site3.set_config(db.DB_BOOTSTRAP_HELPER, True)
self.site4 = self.dbenvClient.repmgr_site("127.0.0.1", client_port)
self.site4.set_config(db.DB_LOCAL_SITE, True)
d = {
db.DB_BOOTSTRAP_HELPER: [False, False, True, False],
db.DB_GROUP_CREATOR: [True, False, False, False],
db.DB_LEGACY: [False, False, False, False],
db.DB_LOCAL_SITE: [True, False, False, True],
db.DB_REPMGR_PEER: [False, False, False, False ],
}
for i, j in list(d.items()) :
for k, v in \
zip([self.site, self.site2, self.site3, self.site4], j) :
if v :
self.assertTrue(k.get_config(i))
else :
self.assertFalse(k.get_config(i))
self.assertNotEqual(self.site.get_eid(), self.site2.get_eid())
self.assertNotEqual(self.site3.get_eid(), self.site4.get_eid())
for i, j in zip([self.site, self.site2, self.site3, self.site4], \
[master_port, client_port, master_port, client_port]) :
addr = i.get_address()
self.assertEqual(addr, ("127.0.0.1", j))
for i in [self.site, self.site2] :
self.assertEqual(i.get_address(),
self.dbenvMaster.repmgr_site_by_eid(i.get_eid()).get_address())
for i in [self.site3, self.site4] :
self.assertEqual(i.get_address(),
self.dbenvClient.repmgr_site_by_eid(i.get_eid()).get_address())
else :
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEqual(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEqual(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEqual(self.dbenvMaster.rep_get_nsites(),2)
self.assertEqual(self.dbenvClient.rep_get_nsites(),2)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
self.assertEqual(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEqual(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
d = self.dbenvMaster.repmgr_site_list()
self.assertEqual(len(d), 1)
d = list(d.values())[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], client_port)
self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
(d[2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEqual(len(d), 1)
d = list(d.values())[0] # There is only one
self.assertEqual(d[0], "127.0.0.1")
self.assertEqual(d[1], master_port)
self.assertTrue((d[2]==db.DB_REPMGR_CONNECTED) or \
(d[2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0o666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v is None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v is not None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual(None, v)
class DBBaseReplication(DBReplication) :
def setUp(self) :
DBReplication.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import queue
self.m2c = queue.Queue()
self.c2m = queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEqual(self.dbenvMaster.rep_get_priority(),10)
self.assertEqual(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
# Here we assign dummy event handlers to allow GC of the test object.
# Since the dummy handler doesn't use any outer scope variable, it
# doesn't keep any reference to the test object.
def dummy(*args) :
pass
self.dbenvMaster.set_event_notify(dummy)
self.dbenvClient.set_event_notify(dummy)
self.dbenvMaster.rep_set_transport(13,dummy)
self.dbenvClient.rep_set_transport(3,dummy)
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v is None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0o666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("master_changes" in d)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v is None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v is not None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v is None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEqual(None, v)
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v is None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
# Race condition showed up after upgrading to Solaris 10 Update 10
# https://forums.oracle.com/forums/thread.jspa?messageID=9902860
# [email protected]: See private email from Paula Bingham (Oracle),
# in 20110929.
while not (self.dbenvClient.rep_stat()["startup_complete"]) :
pass
def test04_test_clockskew(self) :
fast, slow = 1234, 1230
self.dbenvMaster.rep_set_clockskew(fast, slow)
self.assertEqual((fast, slow),
self.dbenvMaster.rep_get_clockskew())
self.basic_rep_threading()
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
apache-2.0
|
calebfoss/tensorflow
|
tensorflow/contrib/training/python/training/device_setter.py
|
38
|
3951
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategies for placing variables on parameter servers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import tensor_shape
class GreedyLoadBalancingStrategy(object):
"""Returns the least-loaded ps task for op placement.
The load is calculated by a user-specified load function passed in at
construction. There are no units for load, and the load function is
responsible for providing an internally consistent measure.
Note that this strategy is very sensitive to the exact order in which
ps ops (typically variables) are created, as it greedily places ops
on the least-loaded ps at the point each op is processed.
One reasonable heuristic is the `variable_size_load_fn`, which
estimates load as the number of bytes that would be used to store and
transmit the entire variable. More advanced load functions
could consider the difference in access patterns across ops, or trade
off CPU-intensive ops with RAM-intensive ops with network bandwidth.
This class is intended to be used as a `ps_strategy` in
`tf.train.replica_device_setter`.
"""
def __init__(self, num_tasks, load_fn):
"""Create a new `LoadBalancingStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
load_fn: A callable that takes an `Operation` and returns a
numeric load value for that op.
"""
self._num_tasks = num_tasks
self._load_fn = load_fn
self._ps_loads = np.zeros(num_tasks)
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: A `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Greedily
places the op on the least-loaded ps task so far, as determined
by the load function.
"""
task = np.argmin(self._ps_loads)
self._ps_loads[task] += self._load_fn(op)
return task
def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
`Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
Args:
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
single output is not fully-defined.
"""
if len(op.outputs) != 1:
raise ValueError("Op %s must have a single output" % op)
output = op.outputs[0]
elem_size = output.dtype.size
shape = output.get_shape()
if not shape.is_fully_defined():
# Due to legacy behavior, scalar "Variable" ops have output Tensors that
# have unknown shape when the op is created (and hence passed to this
# load function for placement), even though the scalar shape is set
# explicitly immediately afterward.
shape = tensor_shape.TensorShape(op.get_attr("shape"))
shape.assert_is_fully_defined()
return shape.num_elements() * elem_size
|
apache-2.0
|
jamesthechamp/zamboni
|
mkt/search/filters.py
|
5
|
12097
|
from django.conf import settings
from django.utils import translation
from elasticsearch_dsl import F, query
from elasticsearch_dsl.filter import Bool
from rest_framework.filters import BaseFilterBackend
import mkt
from mkt.api.base import form_errors, get_region_from_request
from mkt.constants.applications import get_device_id
from mkt.features.utils import get_feature_profile
class SearchQueryFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that scores the given ES queryset
with a should query based on the search query found in the current
request's query parameters.
"""
def _get_locale_analyzer(self, lang):
analyzer = mkt.SEARCH_LANGUAGE_TO_ANALYZER.get(lang)
if (analyzer in mkt.SEARCH_ANALYZER_PLUGINS and
not settings.ES_USE_PLUGINS):
analyzer = None
return analyzer
def filter_queryset(self, request, queryset, view):
q = request.GET.get('q', '').lower()
lang = translation.get_language()
analyzer = self._get_locale_analyzer(lang)
if not q:
return queryset
should = []
rules = [
(query.Match, {'query': q, 'boost': 3, 'analyzer': 'standard'}),
(query.Match, {'query': q, 'boost': 4, 'type': 'phrase',
'slop': 1}),
(query.Prefix, {'value': q, 'boost': 1.5}),
]
# Only add fuzzy queries if q is a single word. It doesn't make sense
# to do a fuzzy query for multi-word queries.
if ' ' not in q:
rules.append(
(query.Fuzzy, {'value': q, 'boost': 2, 'prefix_length': 1}))
# Apply rules to search on few base fields. Some might not be present
# in every document type / indexes.
for k, v in rules:
for field in ('name', 'short_name', 'title', 'app_slug', 'author',
'url_tokenized'):
should.append(k(**{field: v}))
# Exact matches need to be queried against a non-analyzed field. Let's
# do a term query on `name.raw` for an exact match against the item
# name and give it a good boost since this is likely what the user
# wants.
should.append(query.Term(**{'name.raw': {'value': q, 'boost': 10}}))
# Do the same for GUID searches.
should.append(query.Term(**{'guid': {'value': q, 'boost': 10}}))
# If query is numeric, check if it is an ID.
if q.isnumeric():
should.append(query.Term(**{'id': {'value': q, 'boost': 10}}))
if analyzer:
should.append(
query.Match(**{'name_l10n_%s' % analyzer: {'query': q,
'boost': 2.5}}))
should.append(
query.Match(**{'short_name_l10n_%s' % analyzer: {
'query': q,
'boost': 2.5}}))
# Add searches on the description field.
should.append(
query.Match(description={'query': q, 'boost': 0.8,
'type': 'phrase'}))
if analyzer:
desc_field = 'description_l10n_%s' % analyzer
desc_analyzer = ('%s_analyzer' % analyzer
if analyzer in mkt.STEMMER_MAP else analyzer)
should.append(
query.Match(
**{desc_field: {'query': q, 'boost': 0.6, 'type': 'phrase',
'analyzer': desc_analyzer}}))
# Add searches on tag field.
should.append(query.Term(tags={'value': q}))
if ' ' not in q:
should.append(query.Fuzzy(tags={'value': q, 'prefix_length': 1}))
# The list of functions applied to our `function_score` query.
functions = [
query.SF('field_value_factor', field='boost'),
]
# Add a boost for the preferred region, if it exists.
region = get_region_from_request(request)
if region:
functions.append({
'filter': {'term': {'preferred_regions': region.id}},
# TODO: When we upgrade to Elasticsearch 1.4, change this
# to 'weight'.
'boost_factor': 4,
})
return queryset.query('function_score',
query=query.Bool(should=should),
functions=functions)
class SearchFormFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters the given queryset
based on `self.form_class`.
"""
# A mapping of form fields to Elasticsearch fields for those that differ.
FORM_TO_FIELD_MAP = {
'author': 'author.raw',
'cat': 'category',
'has_info_request': 'latest_version.has_info_request',
'has_editor_comment': 'latest_version.has_editor_comment',
'languages': 'supported_locales',
'offline': 'is_offline',
'premium_types': 'premium_type',
'tag': 'tags'
}
def filter_queryset(self, request, queryset, view):
form = view.form_class(request.GET)
if not form.is_valid():
raise form_errors(form)
self.form_data = form.cleaned_data
data = {}
for k, v in self.form_data.items():
data[self.FORM_TO_FIELD_MAP.get(k, k)] = v
# Must filters.
must = []
for field in self.VALID_FILTERS:
value = data.get(field)
if value is not None:
if type(value) == list:
filter_type = 'terms'
else:
filter_type = 'term'
must.append(F(filter_type, **{field: value}))
if must:
return queryset.filter(Bool(must=must))
return queryset
class PublicSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['app_type', 'author.raw', 'category', 'device', 'guid',
'installs_allowed_from', 'is_offline', 'manifest_url',
'premium_type', 'supported_locales', 'tags']
class ReviewerSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['app_type', 'author.raw', 'category', 'device',
'latest_version.has_editor_comment',
'latest_version.has_info_request',
'latest_version.status',
'installs_allowed_from', 'is_escalated', 'is_offline',
'manifest_url', 'premium_type', 'status',
'supported_locales', 'tags']
def filter_queryset(self, request, queryset, view):
queryset = super(ReviewerSearchFormFilter,
self).filter_queryset(request, queryset, view)
# Special case for `is_tarako`, which gets converted to a tag filter.
is_tarako = self.form_data.get('is_tarako')
if is_tarako is not None:
if is_tarako:
queryset = queryset.filter(
Bool(must=[F('term', tags='tarako')]))
else:
queryset = queryset.filter(
Bool(must=[~F('term', tags='tarako')]))
return queryset
class WebsiteSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['keywords', 'category', 'device']
class ReviewerWebsiteSearchFormFilter(SearchFormFilter):
VALID_FILTERS = ['keywords', 'category', 'device', 'status', 'is_disabled']
class PublicAppsFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters only public items --
those with PUBLIC status and not disabled.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('term', status=mkt.STATUS_PUBLIC),
F('term', is_disabled=False)]))
class ValidAppsFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters only valid items --
those with any valid status and not disabled or deleted.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('terms', status=mkt.VALID_STATUSES),
F('term', is_disabled=False)]))
class DeviceTypeFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the matching
device type provided.
"""
def filter_queryset(self, request, queryset, view):
device_id = get_device_id(request)
data = {
'gaia': getattr(request, 'GAIA', False),
'mobile': getattr(request, 'MOBILE', False),
'tablet': getattr(request, 'TABLET', False),
}
flash_incompatible = data['mobile'] or data['gaia']
if device_id:
queryset = queryset.filter(
Bool(must=[F('term', device=device_id)]))
if flash_incompatible:
queryset = queryset.filter(
Bool(must_not=[F('term', uses_flash=True)]))
return queryset
class RegionFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the matching
region provided.
"""
def filter_queryset(self, request, queryset, view):
region = get_region_from_request(request)
if region:
return queryset.filter(
Bool(must_not=[F('term', region_exclusions=region.id)]))
return queryset
class ProfileFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that filters based on the feature
profile provided.
"""
def filter_queryset(self, request, queryset, view):
profile = get_feature_profile(request)
if profile:
must_not = []
for k in profile.to_kwargs(prefix='features.has_').keys():
must_not.append(F('term', **{k: True}))
if must_not:
return queryset.filter(Bool(must_not=must_not))
return queryset
class SortingFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that applies sorting based on the
form data provided.
"""
DEFAULT_SORTING = {
'popularity': '-popularity',
'rating': '-bayesian_rating',
'created': '-created',
'reviewed': '-reviewed',
'name': 'name_sort',
'trending': '-trending',
}
def _get_regional_sort(self, region, field):
"""
A helper method to return the sort field with region for mature
regions, otherwise returns the field.
"""
if region and not region.adolescent:
return ['-%s_%s' % (field, region.id)]
return ['-%s' % field]
def filter_queryset(self, request, queryset, view):
region = get_region_from_request(request)
search_query = request.GET.get('q')
sort = request.GET.getlist('sort')
# When querying (with `?q=`) we want to sort by relevance. If no query
# is provided and no `?sort` is provided, i.e. we are only applying
# filters which don't affect the relevance, we sort by popularity
# descending.
order_by = None
if not search_query:
order_by = self._get_regional_sort(region, 'popularity')
if sort:
if 'popularity' in sort:
order_by = self._get_regional_sort(region, 'popularity')
elif 'trending' in sort:
order_by = self._get_regional_sort(region, 'trending')
else:
order_by = [self.DEFAULT_SORTING[name] for name in sort
if name in self.DEFAULT_SORTING]
if order_by:
return queryset.sort(*order_by)
return queryset
class OpenMobileACLFilter(BaseFilterBackend):
"""
A django-rest-framework filter backend that finds apps using openmobile_acl
feature flag.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(
Bool(must=[F('term', **{'features.has_openmobileacl': True})]))
|
bsd-3-clause
|
eerorika/ansible
|
test/units/module_utils/basic/test_exit_json.py
|
22
|
7348
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import copy
import json
import sys
from ansible.compat.tests import unittest
from units.mock.procenv import swap_stdin_and_argv, swap_stdout
from ansible.module_utils import basic
empty_invocation = {u'module_args': {}}
class TestAnsibleModuleExitJson(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
self.stdin_swap_ctx = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap_ctx.__enter__()
# since we can't use context managers and "with" without overriding run(), call them directly
self.stdout_swap_ctx = swap_stdout()
self.fake_stream = self.stdout_swap_ctx.__enter__()
basic._ANSIBLE_ARGS = None
self.module = basic.AnsibleModule(argument_spec=dict())
def tearDown(self):
# since we can't use context managers and "with" without overriding run(), call them directly to clean up
self.stdin_swap_ctx.__exit__(None, None, None)
self.stdout_swap_ctx.__exit__(None, None, None)
def test_exit_json_no_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json()
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation))
def test_exit_json_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation))
def test_fail_json_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.fail_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 1)
else:
self.assertEquals(ctx.exception.code, 1)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation))
def test_exit_json_proper_changed(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(changed=True, msg='success')
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation))
class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset = (
(dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:[email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:[email protected]/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:[email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret [email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
def test_exit_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec = dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.exit_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
def test_fail_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
expected = copy.deepcopy(expected)
del expected['changed']
expected['failed'] = True
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec = dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.fail_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
|
gpl-3.0
|
nhenezi/kuma
|
vendor/packages/sqlparse/tests/test_format.py
|
6
|
10794
|
# -*- coding: utf-8 -*-
from tests.utils import TestCaseBase
import sqlparse
class TestFormat(TestCaseBase):
def test_keywordcase(self):
sql = 'select * from bar; -- select foo\n'
res = sqlparse.format(sql, keyword_case='upper')
self.ndiffAssertEqual(res, 'SELECT * FROM bar; -- select foo\n')
res = sqlparse.format(sql, keyword_case='capitalize')
self.ndiffAssertEqual(res, 'Select * From bar; -- select foo\n')
res = sqlparse.format(sql.upper(), keyword_case='lower')
self.ndiffAssertEqual(res, 'select * from BAR; -- SELECT FOO\n')
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, sql,
keyword_case='foo')
def test_identifiercase(self):
sql = 'select * from bar; -- select foo\n'
res = sqlparse.format(sql, identifier_case='upper')
self.ndiffAssertEqual(res, 'select * from BAR; -- select foo\n')
res = sqlparse.format(sql, identifier_case='capitalize')
self.ndiffAssertEqual(res, 'select * from Bar; -- select foo\n')
res = sqlparse.format(sql.upper(), identifier_case='lower')
self.ndiffAssertEqual(res, 'SELECT * FROM bar; -- SELECT FOO\n')
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, sql,
identifier_case='foo')
sql = 'select * from "foo"."bar"'
res = sqlparse.format(sql, identifier_case="upper")
self.ndiffAssertEqual(res, 'select * from "FOO"."BAR"')
def test_strip_comments_single(self):
sql = 'select *-- statement starts here\nfrom foo'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select * from foo')
sql = 'select * -- statement starts here\nfrom foo'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select * from foo')
sql = 'select-- foo\nfrom -- bar\nwhere'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select from where')
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, sql,
strip_comments=None)
def test_strip_comments_multi(self):
sql = '/* sql starts here */\nselect'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select')
sql = '/* sql starts here */ select'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select')
sql = '/*\n * sql starts here\n */\nselect'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select')
sql = 'select (/* sql starts here */ select 2)'
res = sqlparse.format(sql, strip_comments=True)
self.ndiffAssertEqual(res, 'select (select 2)')
def test_strip_ws(self):
f = lambda sql: sqlparse.format(sql, strip_whitespace=True)
s = 'select\n* from foo\n\twhere ( 1 = 2 )\n'
self.ndiffAssertEqual(f(s), 'select * from foo where (1 = 2)')
s = 'select -- foo\nfrom bar\n'
self.ndiffAssertEqual(f(s), 'select -- foo\nfrom bar')
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, s,
strip_whitespace=None)
def test_outputformat(self):
sql = 'select * from foo;'
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, sql,
output_format='foo')
class TestFormatReindent(TestCaseBase):
def test_option(self):
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, 'foo',
reindent=2)
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, 'foo',
indent_tabs=2)
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, 'foo',
reindent=True, indent_width='foo')
self.assertRaises(sqlparse.SQLParseError, sqlparse.format, 'foo',
reindent=True, indent_width=-12)
def test_stmts(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo; select bar'
self.ndiffAssertEqual(f(s), 'select foo;\n\nselect bar')
s = 'select foo'
self.ndiffAssertEqual(f(s), 'select foo')
s = 'select foo; -- test\n select bar'
self.ndiffAssertEqual(f(s), 'select foo; -- test\n\nselect bar')
def test_keywords(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo union select * from bar;'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'union',
'select *',
'from bar;']))
def test_parenthesis(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select count(*) from (select * from foo);'
self.ndiffAssertEqual(f(s),
'\n'.join(['select count(*)',
'from',
' (select *',
' from foo);',
])
)
def test_where(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo where bar = 1 and baz = 2 or bzz = 3;'
self.ndiffAssertEqual(f(s), ('select *\nfrom foo\n'
'where bar = 1\n'
' and baz = 2\n'
' or bzz = 3;'))
s = 'select * from foo where bar = 1 and (baz = 2 or bzz = 3);'
self.ndiffAssertEqual(f(s), ('select *\nfrom foo\n'
'where bar = 1\n'
' and (baz = 2\n'
' or bzz = 3);'))
def test_join(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select * from foo join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'join bar on 1 = 2']))
s = 'select * from foo inner join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'inner join bar on 1 = 2']))
s = 'select * from foo left outer join bar on 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select *',
'from foo',
'left outer join bar on 1 = 2']
))
def test_identifier_list(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select foo, bar, baz from table1, table2 where 1 = 2'
self.ndiffAssertEqual(f(s), '\n'.join(['select foo,',
' bar,',
' baz',
'from table1,',
' table2',
'where 1 = 2']))
s = 'select a.*, b.id from a, b'
self.ndiffAssertEqual(f(s), '\n'.join(['select a.*,',
' b.id',
'from a,',
' b']))
def test_case(self):
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'case when foo = 1 then 2 when foo = 3 then 4 else 5 end'
self.ndiffAssertEqual(f(s), '\n'.join(['case when foo = 1 then 2',
' when foo = 3 then 4',
' else 5',
'end']))
def test_nested_identifier_list(self): # issue4
f = lambda sql: sqlparse.format(sql, reindent=True)
s = '(foo as bar, bar1, bar2 as bar3, b4 as b5)'
self.ndiffAssertEqual(f(s), '\n'.join(['(foo as bar,',
' bar1,',
' bar2 as bar3,',
' b4 as b5)']))
def test_duplicate_linebreaks(self): # issue3
f = lambda sql: sqlparse.format(sql, reindent=True)
s = 'select c1 -- column1\nfrom foo'
self.ndiffAssertEqual(f(s), '\n'.join(['select c1 -- column1',
'from foo']))
s = 'select c1 -- column1\nfrom foo'
r = sqlparse.format(s, reindent=True, strip_comments=True)
self.ndiffAssertEqual(r, '\n'.join(['select c1',
'from foo']))
s = 'select c1\nfrom foo\norder by c1'
self.ndiffAssertEqual(f(s), '\n'.join(['select c1',
'from foo',
'order by c1']))
s = 'select c1 from t1 where (c1 = 1) order by c1'
self.ndiffAssertEqual(f(s), '\n'.join(['select c1',
'from t1',
'where (c1 = 1)',
'order by c1']))
class TestOutputFormat(TestCaseBase):
def test_python(self):
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='python')
self.ndiffAssertEqual(f(sql), "sql = 'select * from foo;'")
f = lambda sql: sqlparse.format(sql, output_format='python',
reindent=True)
self.ndiffAssertEqual(f(sql), ("sql = ('select * '\n"
" 'from foo;')"))
def test_php(self):
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='php')
self.ndiffAssertEqual(f(sql), '$sql = "select * from foo;";')
f = lambda sql: sqlparse.format(sql, output_format='php',
reindent=True)
self.ndiffAssertEqual(f(sql), ('$sql = "select * ";\n'
'$sql .= "from foo;";'))
def test_sql(self): # "sql" is an allowed option but has no effect
sql = 'select * from foo;'
f = lambda sql: sqlparse.format(sql, output_format='sql')
self.ndiffAssertEqual(f(sql), 'select * from foo;')
|
mpl-2.0
|
SummerLW/Perf-Insight-Report
|
dashboard/dashboard/task_runner.py
|
1
|
3164
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to run long running tasks.
This allows a task to run in Task Queue which gives about 10 minutes execution
time.
Usage:
In https://chromeperf.appspot.com/_ah/stats/shell, pass a function to
task_runner.Run. Task function should be picklable and must include any
required imports within the function's body.
Example:
from dashboard import task_runner
def unique_test_suite_names():
from dashboard.models import graph_data
query = graph_data.Test.query(graph_data.Test.parent_test == None)
test_keys = query.fetch(limit=50000, keys_only=True)
return sorted(set(k.string_id() for k in test_keys))
task_runner.Run(unique_test_suite_names)
The task function return value and stdouts will be displayed at:
https://chromeperf.appspot.com/get_logs?namespace=task_runner&name=report
WARNING:
Running code in Appstats does affect live dashboard. So watchout for any
datastore writes that may corrupt or unintentionally delete data.
"""
import datetime
import marshal
import cStringIO
import sys
import time
import types
from google.appengine.ext import deferred
from dashboard import quick_logger
_TASK_QUEUE_NAME = 'task-runner-queue'
_REPORT_TEMPLATE = """%(function_name)s: %(start_time)s
Stdout:
%(stdout)s
Elapsed: %(elapsed_time)f seconds.
Returned results:
%(returned_results)s
"""
def Run(task_function):
"""Runs task in task queue."""
# Since defer uses pickle and pickle can't serialize non-global function,
# we'll use marshal to serialize and deserialize the function code object
# before and after defer.
code_string = marshal.dumps(task_function.func_code)
deferred.defer(_TaskWrapper, code_string, task_function.__name__,
_queue=_TASK_QUEUE_NAME)
def _TaskWrapper(code_string, function_name):
"""Runs the task and captures the stdout and the returned results."""
formatted_start_time = datetime.datetime.now().strftime(
'%Y-%m-%d %H:%M:%S %Z')
_AddReportToLog('Starting task "%s" at %s.' %
(function_name, formatted_start_time))
code = marshal.loads(code_string)
task_function = types.FunctionType(code, globals(), 'TaskFunction')
stdout_original = sys.stdout
sys.stdout = stream = cStringIO.StringIO()
start_time = time.time()
try:
returned_results = task_function()
except Exception as e: # Intentionally broad -- pylint: disable=broad-except
print str(e)
returned_results = ''
elapsed_time = time.time() - start_time
stdout = stream.getvalue()
sys.stdout = stdout_original
results = {
'function_name': function_name,
'start_time': formatted_start_time,
'stdout': stdout,
'returned_results': returned_results,
'elapsed_time': elapsed_time
}
_AddReportToLog(_REPORT_TEMPLATE % results)
def _AddReportToLog(report):
"""Adds a log for bench results."""
formatter = quick_logger.Formatter()
logger = quick_logger.QuickLogger('task_runner', 'report', formatter)
logger.Log(report)
logger.Save()
|
bsd-3-clause
|
SurfasJones/djcmsrc3
|
venv/lib/python2.7/site-packages/pip/_vendor/colorama/ansi.py
|
171
|
1089
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
|
mit
|
ktritz/vispy
|
examples/basics/visuals/line_draw.py
|
18
|
6723
|
# -*- coding: utf-8 -*-
# vispy: testskip (KNOWNFAIL)
# Copyright (c) 2015, Felix Schill.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Simple demonstration of mouse drawing and editing of a line plot.
This demo extends the Line visual from scene adding mouse events that allow
modification and creation of line points with the mouse.
Vispy takes care of coordinate transforms from screen to ViewBox - the
demo works on different zoom levels.
"""
import numpy as np
from vispy import app, scene
class EditLineVisual(scene.visuals.Line):
"""
Mouse editing extension to the Line visual.
This class adds mouse picking for line points, mouse_move handling for
dragging existing points, and
adding new points when clicking into empty space.
"""
def __init__(self, *args, **kwargs):
scene.visuals.Line.__init__(self, *args, **kwargs)
# initialize point markers
self.markers = scene.visuals.Markers()
self.marker_colors = np.ones((len(self.pos), 4), dtype=np.float32)
self.markers.set_data(pos=self.pos, symbol="s", edge_color="red",
size=6)
self.selected_point = None
self.selected_index = -1
# snap grid size
self.gridsize = 10
def draw(self, transforms):
# draw line and markers
scene.visuals.Line.draw(self, transforms)
self.markers.draw(transforms)
def print_mouse_event(self, event, what):
""" print mouse events for debugging purposes """
print('%s - pos: %r, button: %s, delta: %r' %
(what, event.pos, event.button, event.delta))
def select_point(self, event, radius=5):
"""
Get line point close to mouse pointer and its index
Parameters
----------
event : the mouse event being processed
radius : scalar
max. distance in pixels between mouse and line point to be accepted
return: (numpy.array, int)
picked point and index of the point in the pos array
"""
# position in scene/document coordinates
pos_scene = event.pos[:3]
# project mouse radius from screen coordinates to document coordinates
mouse_radius = \
(event.visual_to_canvas.imap(np.array([radius, radius, radius])) -
event.visual_to_canvas.imap(np.array([0, 0, 0])))[0]
# print("Mouse radius in document units: ", mouse_radius)
# find first point within mouse_radius
index = 0
for p in self.pos:
if np.linalg.norm(pos_scene - p) < mouse_radius:
# print p, index
# point found, return point and its index
return p, index
index += 1
# no point found, return None
return None, -1
def update_markers(self, selected_index=-1, highlight_color=(1, 0, 0, 1)):
""" update marker colors, and highlight a marker with a given color """
self.marker_colors.fill(1)
# default shape (non-highlighted)
shape = "o"
size = 6
if 0 <= selected_index < len(self.marker_colors):
self.marker_colors[selected_index] = highlight_color
# if there is a highlighted marker,
# change all marker shapes to a square
shape = "s"
size = 8
self.markers.set_data(pos=self.pos, symbol=shape, edge_color='red',
size=size, face_color=self.marker_colors)
def on_mouse_press(self, event):
self.print_mouse_event(event, 'Mouse press')
pos_scene = event.pos[:3]
# find closest point to mouse and select it
self.selected_point, self.selected_index = self.select_point(event)
# if no point was clicked add a new one
if self.selected_point is None:
print("adding point", len(self.pos))
self._pos = np.append(self.pos, [pos_scene], axis=0)
self.set_data(pos=self.pos)
self.marker_colors = np.ones((len(self.pos), 4), dtype=np.float32)
self.selected_point = self.pos[-1]
self.selected_index = len(self.pos) - 1
# update markers and highlights
self.update_markers(self.selected_index)
def on_mouse_release(self, event):
self.print_mouse_event(event, 'Mouse release')
self.selected_point = None
self.update_markers()
def on_mouse_move(self, event):
# left mouse button
if event.button == 1:
# self.print_mouse_event(event, 'Mouse drag')
if self.selected_point is not None:
pos_scene = event.pos
# update selected point to new position given by mouse
self.selected_point[0] = round(pos_scene[0] / self.gridsize) \
* self.gridsize
self.selected_point[1] = round(pos_scene[1] / self.gridsize) \
* self.gridsize
self.set_data(pos=self.pos)
self.update_markers(self.selected_index)
else:
# if no button is pressed, just highlight the marker that would be
# selected on click
hl_point, hl_index = self.select_point(event)
self.update_markers(hl_index, highlight_color=(0.5, 0.5, 1.0, 1.0))
self.update()
class Canvas(scene.SceneCanvas):
""" A simple test canvas for testing the EditLineVisual """
def __init__(self):
scene.SceneCanvas.__init__(self, keys='interactive',
size=(800, 800))
# Create some initial points
n = 7
self.pos = np.zeros((n, 3), dtype=np.float32)
self.pos[:, 0] = np.linspace(-50, 50, n)
self.pos[:, 1] = np.random.normal(size=n, scale=10, loc=0)
# create new editable line
self.line = EditLineVisual(pos=self.pos, color='w', width=3,
antialias=True, method='gl')
self.view = self.central_widget.add_view()
self.view.camera = scene.PanZoomCamera(rect=(-100, -100, 200, 200),
aspect=1.0)
# the left mouse button pan has to be disabled in the camera, as it
# interferes with dragging line points
# Proposed change in camera: make mouse buttons configurable
self.view.camera._viewbox.events.mouse_move.disconnect(
self.view.camera.viewbox_mouse_event)
self.view.add(self.line)
self.show()
self.selected_point = None
scene.visuals.GridLines(parent=self.view.scene)
if __name__ == '__main__':
win = Canvas()
app.run()
|
bsd-3-clause
|
pombredanne/erpnext
|
erpnext/accounts/doctype/period_closing_voucher/test_period_closing_voucher.py
|
12
|
1929
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.utils import flt
from erpnext.accounts.doctype.journal_entry.test_journal_entry import make_journal_entry
class TestPeriodClosingVoucher(unittest.TestCase):
def test_closing_entry(self):
make_journal_entry("_Test Bank - _TC", "Sales - _TC", 400,
"_Test Cost Center - _TC", submit=True)
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Bank - _TC", 600, "_Test Cost Center - _TC", submit=True)
profit_or_loss = frappe.db.sql("""select sum(ifnull(t1.debit,0))-sum(ifnull(t1.credit,0)) as balance
from `tabGL Entry` t1, `tabAccount` t2
where t1.account = t2.name and ifnull(t2.report_type, '') = 'Profit and Loss'
and t2.docstatus < 2 and t2.company = '_Test Company'
and t1.posting_date between '2013-01-01' and '2013-12-31'""")
profit_or_loss = flt(profit_or_loss[0][0]) if profit_or_loss else 0
pcv = self.make_period_closing_voucher()
gle_value = frappe.db.sql("""select ifnull(debit, 0) - ifnull(credit, 0)
from `tabGL Entry` where voucher_type='Period Closing Voucher' and voucher_no=%s
and account = '_Test Account Reserves and Surplus - _TC'""", pcv.name)
gle_value = flt(gle_value[0][0]) if gle_value else 0
self.assertEqual(gle_value, profit_or_loss)
def make_period_closing_voucher(self):
pcv = frappe.get_doc({
"doctype": "Period Closing Voucher",
"closing_account_head": "_Test Account Reserves and Surplus - _TC",
"company": "_Test Company",
"fiscal_year": "_Test Fiscal Year 2013",
"posting_date": "2013-12-31",
"remarks": "test"
})
pcv.insert()
pcv.submit()
return pcv
test_dependencies = ["Customer", "Cost Center"]
test_records = frappe.get_test_records("Period Closing Voucher")
|
agpl-3.0
|
thockin/contrib
|
flannel-server/Godeps/_workspace/src/github.com/ugorji/go/codec/test.py
|
1138
|
3876
|
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
u"someday",
u"",
u"bytestring",
1328176922000002000,
-2206187877999998000,
270,
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
|
apache-2.0
|
linz/QGIS-AIMS-Plugin
|
AIMSDataManager/Feature.py
|
2
|
8047
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright 2015 Crown copyright (c)
# Land Information New Zealand and the New Zealand Government.
# All rights reserved
#
# This program is released under the terms of the 3 clause BSD license. See the
# LICENSE file for more information.
#
################################################################################
#http://devassgeo01:8080/aims/api/address/features - properties
import hashlib
import re
from AimsUtility import FeatureType,ActionType,ApprovalType,FeedType
from AimsLogging import Logger
aimslog = None
# ref is time variable, adrpo is nested and covered by changeid, meta contains non object attrs
HASH_EXCLUDES = ('_ref', '_address_positions','meta')
class Feature(object):
'''Feature data object representing AIMS primary objects Addresses, Groups and Users'''
type = FeedType.FEATURES
global aimslog
aimslog = Logger.setup()
def __init__(self, ref=None):
#aimslog.info('AdrRef.{}'.format(ref))
self._ref = ref
#self._hash = self._hash()#no point, empty
#generic validators
@staticmethod
def _vString(sval): return isinstance(sval,str) #alpha only filter?
@staticmethod
def _vInt(ival): return isinstance(ival, int) #range filter?
@staticmethod
def _vDate(date): return Feature._vString(date) and bool(re.match('^\d{4}-\d{2}-\d{2}$',date))
@staticmethod
def _vEmail(email): return Feature._vString(email) and bool(re.match('^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$',email))
#COMMON---------------------------------------------
#version not used on non feed feaures types but its inclusion here won't matter
def setVersion (self, version):
self._version = version if Feature._vInt(version) else None
def getVersion(self):
return self._version
def setSourceUser (self, sourceUser):
self._workflow_sourceUser = sourceUser
def getSourceUser (self):
return self._workflow_sourceUser
def setSourceOrganisation (self, sourceOrganisation):
self._workflow_sourceOrganisation = sourceOrganisation
def getSourceOrganisation (self):
return self._workflow_sourceOrganisation
def setChangeType(self, changeType):
self._changeType = changeType
def getChangeType(self):
return self._changeType
def setQueueStatus(self, queueStatus):
self._queueStatus = queueStatus
def getQueueStatus(self):
return self._queueStatus
#---------------------------------------------------
def _setEntities(self,entities):
self.setMeta()
self.meta.entities = entities
def _getEntities(self):
return self.meta.entities
def setMeta(self, meta = None):
if not hasattr(self,'meta'): self.meta = meta if meta else FeatureMetaData()
def getMeta(self):
return self.meta if hasattr(self, 'meta') else None
# def compare(self,other):
# '''Feature equality comparator using simple attribute comparison
# @param other: Another Feature object whose attributes will be compared to selfs
# @return: Boolean
# '''
# #return False if isinstance(self,other) else hash(self)==hash(other)
# #IMPORTANT. Attribute value compare only useful with distinct (deepcopy'd) instances
# return all((getattr(self,a)==getattr(other,a) for a in self.__dict__.keys()))
def merge(self,other,exclude=''):
'''Merges new (other) atributes into existing (self) object
@param other: Another Feature object whose attributes will be added to selfs attributes
@return: Feature
'''
for key in other.__dict__.keys():
if key not in exclude.split(','): setattr(self,key, getattr(other,key))
return self
#---------------------------------
def setRequestId(self,requestId):
'''Set meta requestid variable on Feature object
@param requestId: User generated variable attatched to and identifying AIMS individual requests. Integer type restricted at meta setter
@type requestId: Integer
'''
self.setMeta()
self.meta.requestId = requestId
def getRequestId(self):
return self.meta.requestId if hasattr(self,'meta') else None
def setErrors(self,errors):
'''Set meta error variable on Feature object
@param error: Error string typically set from HTTP error message returned at API interface
@type error: String
'''
self.setMeta()
self.meta.errors = errors
def getErrors(self):
return self.meta.errors if hasattr(self,'meta') else None
#object hash of attributes for page comparison
def getHash(self):
'''Generates unique hash values for Feature objects.
Hashes are calculated by reading all attributes excluding the ref, meta and position attributes.
Numeric values are converted to string and unicode values are encoded
The resulting string attributes are append reduced and their md5 calculated.
The hexdigest of this hash is returned
@return: 32 digit hexdigest representing hash code
'''
#discard all list/nested attributes? This should be okay since we capture the version addess|changeId in the top level
s0 = [getattr(self,z) for z in self.__dict__.keys() if z not in HASH_EXCLUDES]
s1 = [str(z) for z in s0 if isinstance(z,(int,float,long,complex))]
s2 = [z.encode('utf8') for z in s0 if isinstance(z,(basestring)) and z not in s1]
#return reduce(lambda x,y: x.update(y), s1+s2,hashlib.md5()) #reduce wont recognise haslib objs
self.setMeta()
self.meta.hash = hashlib.md5(reduce(lambda x,y: x+y, s1+s2)).hexdigest()
return self.meta.hash
@staticmethod
def clone(a,b=None):
'''Clones attributes of A to B and instantiates B (as type A) if not provided
@param a: Feature object to-be cloned
@type a: Feature
@param b: Feature object being overwritten (optional)
@type b: Feature
@return: Manual deepcop of Feature object
'''
#duplicates only attributes set in source object
from FeatureFactory import FeatureFactory
if not b: b = FeatureFactory.getInstance(a.type).get()
for attr in a.__dict__.keys(): setattr(b,attr,getattr(a,attr))
return b
@staticmethod
def compare(a,b):
'''Compares supplied feature with each other using computed hash values
@param a: One of the features being compared
@type a: Feature
@param b: The other feature being compared
@type b: Feature
'''
#TODO implement an ordering criteria for sorting
return a.getHash()==b.getHash()
class FeatureMetaData(object):
'''Embedded container for address meta information and derived attributes eg warnings, errors and tracking'''
def __init__(self):
'''Initialise metadata container with al null equivalents'''
self._requestId,self._statusMessage,self._errors,self._entities, self._hash = 0,'',[],[],None
@property
def requestId(self): return self._requestId
@requestId.setter
def requestId(self, requestId): self._requestId = requestId if Feature._vInt(requestId) else None
@property
def entities(self): return self._entities
@entities.setter
def entities(self,entities): self._entities = entities
@property
def errors(self): return self._errors
@errors.setter
def errors(self,errors): self._errors = errors
@property
def hash(self): return self._hash
@hash.setter
def hash(self,hash): self._hash = hash
|
bsd-3-clause
|
carvsdriver/msm8660-common_marla
|
tools/perf/scripts/python/failed-syscalls-by-pid.py
|
11180
|
2058
|
# failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
gpl-2.0
|
houzhenggang/openwrt-981213
|
scripts/dl_cleanup.py
|
223
|
6094
|
#!/usr/bin/env python3
"""
# OpenWrt download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (C) 2010-2015 Michael Buesch <[email protected]>
# Copyright (C) 2013-2015 OpenWrt.org
"""
from __future__ import print_function
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except IndexError as e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except IndexError as e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".tar.xz",
".orig.tar.gz",
".orig.tar.bz2",
".orig.tar.xz",
".zip",
".tgz",
".tbz",
".txz",
)
versionRegex = (
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-\d.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print(self.filename, "has an unknown file-extension")
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print(self.filename, "has an unknown version pattern")
raise EntryParseError("ver")
def getPath(self):
return (self.directory + "/" + self.filename).replace("//", "/")
def deleteFile(self):
path = self.getPath()
print("Deleting", path)
if not opt_dryrun:
os.unlink(path)
def __ge__(self, y):
return self.version >= y.version
def usage():
print("OpenWrt download directory cleanup utility")
print("Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>")
print("")
print(" -d|--dry-run Do a dry-run. Don't delete any files")
print(" -B|--show-blacklist Show the blacklist and exit")
print(" -w|--whitelist ITEM Remove ITEM from blacklist")
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
usage()
return 1
except getopt.GetoptError as e:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print("Whitelist error: Item", v,\
"is not in blacklist")
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
sep = "\t\t"
if len(name) >= 8:
sep = "\t"
print("%s%s(%s)" % (name, sep, regex.pattern))
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print(filename, "is blacklisted")
break
else:
try:
entries.append(Entry(directory, filename))
except EntryParseError as e:
pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version is not lastVersion:
version.deleteFile()
if opt_dryrun:
print("Keeping", lastVersion.getPath())
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
gpl-2.0
|
xwolf12/django
|
tests/flatpages_tests/test_forms.py
|
165
|
4569
|
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.flatpages', ]})
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
def setUp(self):
# Site fields cache needs to be cleared after flatpages is added to
# INSTALLED_APPS
Site._meta._expire_cache()
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True,
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False,
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
with translation.override('en'):
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'sites': [translation.ugettext('This field is required.')]})
|
bsd-3-clause
|
dimkal/mne-python
|
mne/forward/_lead_dots.py
|
7
|
19514
|
# Authors: Eric Larson <[email protected]>
# Mainak Jas <[email protected]>
# Matti Hamalainen <[email protected]>
#
# License: BSD (3-clause)
import os
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from ..parallel import parallel_func
from ..utils import logger, _get_extra_data_path
##############################################################################
# FAST LEGENDRE (DERIVATIVE) POLYNOMIALS USING LOOKUP TABLE
def _next_legen_der(n, x, p0, p01, p0d, p0dd):
"""Compute the next Legendre polynomial and its derivatives"""
# only good for n > 1 !
help_ = p0
helpd = p0d
p0 = ((2 * n - 1) * x * help_ - (n - 1) * p01) / n
p0d = n * help_ + x * helpd
p0dd = (n + 1) * helpd + x * p0dd
p01 = help_
return p0, p0d, p0dd
def _get_legen(x, n_coeff=100):
"""Get Legendre polynomials expanded about x"""
return legendre.legvander(x, n_coeff - 1)
def _get_legen_der(xx, n_coeff=100):
"""Get Legendre polynomial derivatives expanded about x"""
coeffs = np.empty((len(xx), n_coeff, 3))
for c, x in zip(coeffs, xx):
p0s, p0ds, p0dds = c[:, 0], c[:, 1], c[:, 2]
p0s[:2] = [1.0, x]
p0ds[:2] = [0.0, 1.0]
p0dds[:2] = [0.0, 0.0]
for n in range(2, n_coeff):
p0s[n], p0ds[n], p0dds[n] = _next_legen_der(
n, x, p0s[n - 1], p0s[n - 2], p0ds[n - 1], p0dds[n - 1])
return coeffs
def _get_legen_table(ch_type, volume_integral=False, n_coeff=100,
n_interp=20000, force_calc=False):
"""Return a (generated) LUT of Legendre (derivative) polynomial coeffs"""
if n_interp % 2 != 0:
raise RuntimeError('n_interp must be even')
fname = op.join(_get_extra_data_path(), 'tables')
if not op.isdir(fname):
# Updated due to API chang (GH 1167)
os.makedirs(fname)
if ch_type == 'meg':
fname = op.join(fname, 'legder_%s_%s.bin' % (n_coeff, n_interp))
leg_fun = _get_legen_der
extra_str = ' derivative'
lut_shape = (n_interp + 1, n_coeff, 3)
else: # 'eeg'
fname = op.join(fname, 'legval_%s_%s.bin' % (n_coeff, n_interp))
leg_fun = _get_legen
extra_str = ''
lut_shape = (n_interp + 1, n_coeff)
if not op.isfile(fname) or force_calc:
n_out = (n_interp // 2)
logger.info('Generating Legendre%s table...' % extra_str)
x_interp = np.arange(-n_out, n_out + 1, dtype=np.float64) / n_out
lut = leg_fun(x_interp, n_coeff).astype(np.float32)
if not force_calc:
with open(fname, 'wb') as fid:
fid.write(lut.tostring())
else:
logger.info('Reading Legendre%s table...' % extra_str)
with open(fname, 'rb', buffering=0) as fid:
lut = np.fromfile(fid, np.float32)
lut.shape = lut_shape
# we need this for the integration step
n_fact = np.arange(1, n_coeff, dtype=float)
if ch_type == 'meg':
n_facts = list() # multn, then mult, then multn * (n + 1)
if volume_integral:
n_facts.append(n_fact / ((2.0 * n_fact + 1.0) *
(2.0 * n_fact + 3.0)))
else:
n_facts.append(n_fact / (2.0 * n_fact + 1.0))
n_facts.append(n_facts[0] / (n_fact + 1.0))
n_facts.append(n_facts[0] * (n_fact + 1.0))
# skip the first set of coefficients because they are not used
lut = lut[:, 1:, [0, 1, 1, 2]] # for multiplicative convenience later
# reshape this for convenience, too
n_facts = np.array(n_facts)[[2, 0, 1, 1], :].T
n_facts = np.ascontiguousarray(n_facts)
n_fact = n_facts
else: # 'eeg'
n_fact = (2.0 * n_fact + 1.0) * (2.0 * n_fact + 1.0) / n_fact
# skip the first set of coefficients because they are not used
lut = lut[:, 1:].copy()
return lut, n_fact
def _get_legen_lut_fast(x, lut):
"""Return Legendre coefficients for given x values in -1<=x<=1"""
# map into table vals (works for both vals and deriv tables)
n_interp = (lut.shape[0] - 1.0)
# equiv to "(x + 1.0) / 2.0) * n_interp" but faster
mm = x * (n_interp / 2.0) + 0.5 * n_interp
# nearest-neighbor version (could be decent enough...)
idx = np.round(mm).astype(int)
vals = lut[idx]
return vals
def _get_legen_lut_accurate(x, lut):
"""Return Legendre coefficients for given x values in -1<=x<=1"""
# map into table vals (works for both vals and deriv tables)
n_interp = (lut.shape[0] - 1.0)
# equiv to "(x + 1.0) / 2.0) * n_interp" but faster
mm = x * (n_interp / 2.0) + 0.5 * n_interp
# slower, more accurate interpolation version
mm = np.minimum(mm, n_interp - 0.0000000001)
idx = np.floor(mm).astype(int)
w2 = mm - idx
w2.shape += tuple([1] * (lut.ndim - w2.ndim)) # expand to correct size
vals = (1 - w2) * lut[idx] + w2 * lut[idx + 1]
return vals
def _comp_sum_eeg(beta, ctheta, lut_fun, n_fact):
"""Lead field dot products using Legendre polynomial (P_n) series"""
# Compute the sum occurring in the evaluation.
# The result is
# sums[:] (2n+1)^2/n beta^n P_n
coeffs = lut_fun(ctheta)
betans = np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
axis=1)
s0 = np.dot(coeffs * betans, n_fact) # == weighted sum across cols
return s0
def _comp_sums_meg(beta, ctheta, lut_fun, n_fact, volume_integral):
"""Lead field dot products using Legendre polynomial (P_n) series.
Parameters
----------
beta : array, shape (n_points * n_points, 1)
Coefficients of the integration.
ctheta : array, shape (n_points * n_points, 1)
Cosine of the angle between the sensor integration points.
lut_fun : callable
Look-up table for evaluating Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
volume_integral : bool
If True, compute volume integral.
Returns
-------
sums : array, shape (4, n_points * n_points)
The results.
"""
# Compute the sums occurring in the evaluation.
# Two point magnetometers on the xz plane are assumed.
# The four sums are:
# * sums[:, 0] n(n+1)/(2n+1) beta^(n+1) P_n
# * sums[:, 1] n/(2n+1) beta^(n+1) P_n'
# * sums[:, 2] n/((2n+1)(n+1)) beta^(n+1) P_n'
# * sums[:, 3] n/((2n+1)(n+1)) beta^(n+1) P_n''
coeffs = lut_fun(ctheta)
beta = (np.cumprod(np.tile(beta[:, np.newaxis], (1, n_fact.shape[0])),
axis=1) * beta[:, np.newaxis])
# This is equivalent, but slower:
# sums = np.sum(beta[:, :, np.newaxis] * n_fact * coeffs, axis=1)
# sums = np.rollaxis(sums, 2)
sums = np.einsum('ij,jk,ijk->ki', beta, n_fact, coeffs)
return sums
###############################################################################
# SPHERE DOTS
def _fast_sphere_dot_r0(r, rr1, rr2, lr1, lr2, cosmags1, cosmags2,
w1, w2, volume_integral, lut, n_fact, ch_type):
"""Lead field dot product computation for M/EEG in the sphere model.
Parameters
----------
r : float
The integration radius. It is used to calculate beta as:
beta = (r * r) / (lr1 * lr2).
rr1 : array, shape (n_points x 3)
Normalized position vectors of integrations points in first sensor.
rr2 : array, shape (n_points x 3)
Normalized position vector of integration points in second sensor.
lr1 : array, shape (n_points x 1)
Magnitude of position vector of integration points in first sensor.
lr2 : array, shape (n_points x 1)
Magnitude of position vector of integration points in second sensor.
cosmags1 : array, shape (n_points x 1)
Direction of integration points in first sensor.
cosmags2 : array, shape (n_points x 1)
Direction of integration points in second sensor.
w1 : array, shape (n_points x 1)
Weights of integration points in the first sensor.
w2 : array, shape (n_points x 1)
Weights of integration points in the second sensor.
volume_integral : bool
If True, compute volume integral.
lut : callable
Look-up table for evaluating Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
ch_type : str
The channel type. It can be 'meg' or 'eeg'.
Returns
-------
result : float
The integration sum.
"""
ct = np.einsum('ik,jk->ij', rr1, rr2) # outer product, sum over coords
# expand axes
rr1 = rr1[:, np.newaxis, :] # (n_rr1, n_rr2, n_coord) e.g. 4x4x3
rr2 = rr2[np.newaxis, :, :]
lr1lr2 = lr1[:, np.newaxis] * lr2[np.newaxis, :]
beta = (r * r) / lr1lr2
if ch_type == 'meg':
sums = _comp_sums_meg(beta.flatten(), ct.flatten(), lut, n_fact,
volume_integral)
sums.shape = (4,) + beta.shape
# Accumulate the result, a little bit streamlined version
# cosmags1 = cosmags1[:, np.newaxis, :]
# cosmags2 = cosmags2[np.newaxis, :, :]
# n1c1 = np.sum(cosmags1 * rr1, axis=2)
# n1c2 = np.sum(cosmags1 * rr2, axis=2)
# n2c1 = np.sum(cosmags2 * rr1, axis=2)
# n2c2 = np.sum(cosmags2 * rr2, axis=2)
# n1n2 = np.sum(cosmags1 * cosmags2, axis=2)
n1c1 = np.einsum('ik,ijk->ij', cosmags1, rr1)
n1c2 = np.einsum('ik,ijk->ij', cosmags1, rr2)
n2c1 = np.einsum('jk,ijk->ij', cosmags2, rr1)
n2c2 = np.einsum('jk,ijk->ij', cosmags2, rr2)
n1n2 = np.einsum('ik,jk->ij', cosmags1, cosmags2)
part1 = ct * n1c1 * n2c2
part2 = n1c1 * n2c1 + n1c2 * n2c2
result = (n1c1 * n2c2 * sums[0] +
(2.0 * part1 - part2) * sums[1] +
(n1n2 + part1 - part2) * sums[2] +
(n1c2 - ct * n1c1) * (n2c1 - ct * n2c2) * sums[3])
# Give it a finishing touch!
const = 4e-14 * np.pi # This is \mu_0^2/4\pi
result *= (const / lr1lr2)
if volume_integral:
result *= r
else: # 'eeg'
sums = _comp_sum_eeg(beta.flatten(), ct.flatten(), lut, n_fact)
sums.shape = beta.shape
# Give it a finishing touch!
eeg_const = 1.0 / (4.0 * np.pi)
result = eeg_const * sums / lr1lr2
# new we add them all up with weights
if w1 is None: # operating on surface, treat independently
# result = np.sum(w2[np.newaxis, :] * result, axis=1)
result = np.dot(result, w2)
else:
# result = np.sum((w1[:, np.newaxis] * w2[np.newaxis, :]) * result)
result = np.einsum('i,j,ij', w1, w2, result)
return result
def _do_self_dots(intrad, volume, coils, r0, ch_type, lut, n_fact, n_jobs):
"""Perform the lead field dot product integrations.
Parameters
----------
intrad : float
The integration radius. It is used to calculate beta as:
beta = (intrad * intrad) / (r1 * r2).
volume : bool
If True, perform volume integral.
coils : list of dict
The coils.
r0 : array, shape (3 x 1)
The origin of the sphere.
ch_type : str
The channel type. It can be 'meg' or 'eeg'.
lut : callable
Look-up table for evaluating Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
products : array, shape (n_coils, n_coils)
The integration products.
"""
if ch_type == 'eeg':
intrad *= 0.7
# convert to normalized distances from expansion center
rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
cosmags = [coil['cosmag'] for coil in coils]
ws = [coil['w'] for coil in coils]
parallel, p_fun, _ = parallel_func(_do_self_dots_subset, n_jobs)
prods = parallel(p_fun(intrad, rmags, rlens, cosmags,
ws, volume, lut, n_fact, ch_type, idx)
for idx in np.array_split(np.arange(len(rmags)), n_jobs))
products = np.sum(prods, axis=0)
return products
def _do_self_dots_subset(intrad, rmags, rlens, cosmags, ws, volume, lut,
n_fact, ch_type, idx):
"""Helper for parallelization"""
# all possible combinations of two magnetometers
products = np.zeros((len(rmags), len(rmags)))
for ci1 in idx:
for ci2 in range(0, ci1 + 1):
res = _fast_sphere_dot_r0(intrad, rmags[ci1], rmags[ci2],
rlens[ci1], rlens[ci2],
cosmags[ci1], cosmags[ci2],
ws[ci1], ws[ci2], volume, lut,
n_fact, ch_type)
products[ci1, ci2] = res
products[ci2, ci1] = res
return products
def _do_cross_dots(intrad, volume, coils1, coils2, r0, ch_type,
lut, n_fact):
"""Compute lead field dot product integrations between two coil sets.
The code is a direct translation of MNE-C code found in
`mne_map_data/lead_dots.c`.
Parameters
----------
intrad : float
The integration radius. It is used to calculate beta as:
beta = (intrad * intrad) / (r1 * r2).
volume : bool
If True, compute volume integral.
coils1 : list of dict
The original coils.
coils2 : list of dict
The coils to which data is being mapped.
r0 : array, shape (3 x 1).
The origin of the sphere.
ch_type : str
The channel type. It can be 'meg' or 'eeg'
lut : callable
Look-up table for evaluating Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
Returns
-------
products : array, shape (n_coils, n_coils)
The integration products.
"""
rmags1 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils1]
rmags2 = [coil['rmag'] - r0[np.newaxis, :] for coil in coils2]
rlens1 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags1]
rlens2 = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags2]
rmags1 = [r / rl[:, np.newaxis] for r, rl in zip(rmags1, rlens1)]
rmags2 = [r / rl[:, np.newaxis] for r, rl in zip(rmags2, rlens2)]
ws1 = [coil['w'] for coil in coils1]
ws2 = [coil['w'] for coil in coils2]
cosmags1 = [coil['cosmag'] for coil in coils1]
cosmags2 = [coil['cosmag'] for coil in coils2]
products = np.zeros((len(rmags1), len(rmags2)))
for ci1 in range(len(coils1)):
for ci2 in range(len(coils2)):
res = _fast_sphere_dot_r0(intrad, rmags1[ci1], rmags2[ci2],
rlens1[ci1], rlens2[ci2], cosmags1[ci1],
cosmags2[ci2], ws1[ci1], ws2[ci2],
volume, lut, n_fact, ch_type)
products[ci1, ci2] = res
return products
def _do_surface_dots(intrad, volume, coils, surf, sel, r0, ch_type,
lut, n_fact, n_jobs):
"""Compute the map construction products
Parameters
----------
intrad : float
The integration radius. It is used to calculate beta as:
beta = (intrad * intrad) / (r1 * r2)
volume : bool
If True, compute a volume integral.
coils : list of dict
The coils.
surf : dict
The surface on which the field is interpolated.
sel : array
Indices of the surface vertices to select.
r0 : array, shape (3 x 1)
The origin of the sphere.
ch_type : str
The channel type. It can be 'meg' or 'eeg'.
lut : callable
Look-up table for Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
products : array, shape (n_coils, n_coils)
The integration products.
"""
# convert to normalized distances from expansion center
rmags = [coil['rmag'] - r0[np.newaxis, :] for coil in coils]
rlens = [np.sqrt(np.sum(r * r, axis=1)) for r in rmags]
rmags = [r / rl[:, np.newaxis] for r, rl in zip(rmags, rlens)]
cosmags = [coil['cosmag'] for coil in coils]
ws = [coil['w'] for coil in coils]
rref = None
refl = None
# virt_ref = False
if ch_type == 'eeg':
intrad *= 0.7
# The virtual ref code is untested and unused, so it is
# commented out for now
# if virt_ref:
# rref = virt_ref[np.newaxis, :] - r0[np.newaxis, :]
# refl = np.sqrt(np.sum(rref * rref, axis=1))
# rref /= refl[:, np.newaxis]
rsurf = surf['rr'][sel] - r0[np.newaxis, :]
lsurf = np.sqrt(np.sum(rsurf * rsurf, axis=1))
rsurf /= lsurf[:, np.newaxis]
this_nn = surf['nn'][sel]
# loop over the coils
parallel, p_fun, _ = parallel_func(_do_surface_dots_subset, n_jobs)
prods = parallel(p_fun(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
idx)
for idx in np.array_split(np.arange(len(rmags)), n_jobs))
products = np.sum(prods, axis=0)
return products
def _do_surface_dots_subset(intrad, rsurf, rmags, rref, refl, lsurf, rlens,
this_nn, cosmags, ws, volume, lut, n_fact, ch_type,
idx):
"""Helper for parallelization.
Parameters
----------
refl : array | None
If ch_type is 'eeg', the magnitude of position vector of the
virtual reference (never used).
lsurf : array
Magnitude of position vector of the surface points.
rlens : list of arrays of length n_coils
Magnitude of position vector.
this_nn : array, shape (n_vertices, 3)
Surface normals.
cosmags : list of array.
Direction of the integration points in the coils.
ws : list of array
Integration weights of the coils.
volume : bool
If True, compute volume integral.
lut : callable
Look-up table for evaluating Legendre polynomials.
n_fact : array
Coefficients in the integration sum.
ch_type : str
'meg' or 'eeg'
idx : array, shape (n_coils x 1)
Index of coil.
Returns
-------
products : array, shape (n_coils, n_coils)
The integration products.
"""
products = np.zeros((len(rsurf), len(rmags)))
for ci in idx:
res = _fast_sphere_dot_r0(intrad, rsurf, rmags[ci],
lsurf, rlens[ci],
this_nn, cosmags[ci],
None, ws[ci], volume, lut,
n_fact, ch_type)
if rref is not None:
raise NotImplementedError # we don't ever use this, isn't tested
# vres = _fast_sphere_dot_r0(intrad, rref, rmags[ci],
# refl, rlens[ci],
# this_nn, cosmags[ci],
# None, ws[ci], volume, lut,
# n_fact, ch_type)
# products[:, ci] = res - vres
else:
products[:, ci] = res
return products
|
bsd-3-clause
|
magic0704/neutron
|
neutron/tests/tempest/services/identity/v3/json/token_client.py
|
23
|
6596
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest_lib.common import rest_client
from tempest_lib import exceptions as lib_exc
from neutron.tests.tempest.common import service_client
from neutron.tests.tempest import exceptions
class V3TokenClientJSON(rest_client.RestClient):
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
dscv = disable_ssl_certificate_validation
super(V3TokenClientJSON, self).__init__(
None, None, None, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests)
if not auth_url:
raise exceptions.InvalidConfiguration('you must specify a v3 uri '
'if using the v3 identity '
'api')
if 'auth/tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/auth/tokens'
self.auth_url = auth_url
def auth(self, user_id=None, username=None, password=None, project_id=None,
project_name=None, user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None, domain_id=None,
domain_name=None, token=None):
"""
:param user_id: user id
:param username: user name
:param user_domain_id: the user domain id
:param user_domain_name: the user domain name
:param project_domain_id: the project domain id
:param project_domain_name: the project domain name
:param domain_id: a domain id to scope to
:param domain_name: a domain name to scope to
:param project_id: a project id to scope to
:param project_name: a project name to scope to
:param token: a token to re-scope.
Accepts different combinations of credentials.
Sample sample valid combinations:
- token
- token, project_name, project_domain_id
- user_id, password
- username, password, user_domain_id
- username, password, project_name, user_domain_id, project_domain_id
Validation is left to the server side.
"""
creds = {
'auth': {
'identity': {
'methods': [],
}
}
}
id_obj = creds['auth']['identity']
if token:
id_obj['methods'].append('token')
id_obj['token'] = {
'id': token
}
if (user_id or username) and password:
id_obj['methods'].append('password')
id_obj['password'] = {
'user': {
'password': password,
}
}
if user_id:
id_obj['password']['user']['id'] = user_id
else:
id_obj['password']['user']['name'] = username
_domain = None
if user_domain_id is not None:
_domain = dict(id=user_domain_id)
elif user_domain_name is not None:
_domain = dict(name=user_domain_name)
if _domain:
id_obj['password']['user']['domain'] = _domain
if (project_id or project_name):
_project = dict()
if project_id:
_project['id'] = project_id
elif project_name:
_project['name'] = project_name
if project_domain_id is not None:
_project['domain'] = {'id': project_domain_id}
elif project_domain_name is not None:
_project['domain'] = {'name': project_domain_name}
creds['auth']['scope'] = dict(project=_project)
elif domain_id:
creds['auth']['scope'] = dict(domain={'id': domain_id})
elif domain_name:
creds['auth']['scope'] = dict(domain={'name': domain_name})
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
elif extra_headers:
try:
headers.update(self.get_headers(accept_type="json"))
except (ValueError, TypeError):
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise lib_exc.Unauthorized(resp_body['error']['message'])
elif resp.status not in [200, 201, 204]:
raise exceptions.IdentityError(
'Unexpected status code {0}'.format(resp.status))
return resp, json.loads(resp_body)
def get_token(self, **kwargs):
"""
Returns (token id, token data) for supplied credentials
"""
auth_data = kwargs.pop('auth_data', False)
if not (kwargs.get('user_domain_id') or
kwargs.get('user_domain_name')):
kwargs['user_domain_name'] = 'Default'
if not (kwargs.get('project_domain_id') or
kwargs.get('project_domain_name')):
kwargs['project_domain_name'] = 'Default'
body = self.auth(**kwargs)
token = body.response.get('x-subject-token')
if auth_data:
return token, body['token']
else:
return token
|
apache-2.0
|
clidoc/clidoc
|
lib/gtest-1.7.0/test/gtest_test_utils.py
|
1100
|
10812
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
|
mit
|
Nicop06/ansible
|
lib/ansible/modules/windows/win_power_plan.py
|
42
|
1656
|
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: win_power_plan
short_description: Changes the power plan of a Windows system
description:
- This module will change the power plan of a Windows system to the defined string.
- Windows defaults to C(balanced) which will cause CPU throttling. In some cases it can be preferable
to change the mode to C(high performance) to increase CPU performance.
version_added: "2.4"
author:
- Noah Sparks (@nwsparks)
options:
name:
description:
- String value that indicates the desired power plan. The power plan must already be
present on the system. Commonly there will be options for C(balanced) and C(high performance).
required: True
requirements:
- Windows Server 2008R2 (6.1)/Windows 7 or higher
'''
EXAMPLES = '''
- name: change power plan to high performance
win_power_plan:
name: high performance
'''
RETURN = r'''
power_plan_name:
description: Value of the intended power plan
returned: always
type: string
sample: balanced
power_plan_enabled:
description: State of the intended power plan
returned: success
type: boolean
sample: True
all_available_plans:
description: The name and enabled state of all power plans
returned: always
type: dictionary
sample: |
{
"High performance": false,
"Balanced": true,
"Power saver": false
}
'''
|
gpl-3.0
|
doganov/edx-platform
|
common/djangoapps/auth_exchange/views.py
|
116
|
3743
|
# pylint: disable=abstract-method
"""
Views to support exchange of authentication credentials.
The following are currently implemented:
1. AccessTokenExchangeView:
3rd party (social-auth) OAuth 2.0 access token -> 1st party (open-edx) OAuth 2.0 access token
2. LoginWithAccessTokenView:
1st party (open-edx) OAuth 2.0 access token -> session cookie
"""
from django.conf import settings
from django.contrib.auth import login
import django.contrib.auth as auth
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from provider import constants
from provider.oauth2.views import AccessTokenView as AccessTokenView
from rest_framework import permissions
from rest_framework.views import APIView
import social.apps.django_app.utils as social_utils
from auth_exchange.forms import AccessTokenExchangeForm
from openedx.core.lib.api.authentication import OAuth2AuthenticationAllowInactiveUser
class AccessTokenExchangeView(AccessTokenView):
"""
View for token exchange from 3rd party OAuth access token to 1st party OAuth access token
"""
@method_decorator(csrf_exempt)
@method_decorator(social_utils.strategy("social:complete"))
def dispatch(self, *args, **kwargs):
return super(AccessTokenExchangeView, self).dispatch(*args, **kwargs)
def get(self, request, _backend): # pylint: disable=arguments-differ
return super(AccessTokenExchangeView, self).get(request)
def post(self, request, _backend): # pylint: disable=arguments-differ
form = AccessTokenExchangeForm(request=request, data=request.POST)
if not form.is_valid():
return self.error_response(form.errors)
user = form.cleaned_data["user"]
scope = form.cleaned_data["scope"]
client = form.cleaned_data["client"]
if constants.SINGLE_ACCESS_TOKEN:
edx_access_token = self.get_access_token(request, user, scope, client)
else:
edx_access_token = self.create_access_token(request, user, scope, client)
return self.access_token_response(edx_access_token)
class LoginWithAccessTokenView(APIView):
"""
View for exchanging an access token for session cookies
"""
authentication_classes = (OAuth2AuthenticationAllowInactiveUser,)
permission_classes = (permissions.IsAuthenticated,)
@staticmethod
def _get_path_of_arbitrary_backend_for_user(user):
"""
Return the path to the first found authentication backend that recognizes the given user.
"""
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = auth.load_backend(backend_path)
if backend.get_user(user.id):
return backend_path
@method_decorator(csrf_exempt)
def post(self, request):
"""
Handler for the POST method to this view.
"""
# The django login method stores the user's id in request.session[SESSION_KEY] and the
# path to the user's authentication backend in request.session[BACKEND_SESSION_KEY].
# The login method assumes the backend path had been previously stored in request.user.backend
# in the 'authenticate' call. However, not all authentication providers do so.
# So we explicitly populate the request.user.backend field here.
if not hasattr(request.user, 'backend'):
request.user.backend = self._get_path_of_arbitrary_backend_for_user(request.user)
login(request, request.user) # login generates and stores the user's cookies in the session
return HttpResponse(status=204) # cookies stored in the session are returned with the response
|
agpl-3.0
|
1900/scrapy
|
scrapy/contrib/memdebug.py
|
7
|
1346
|
"""
MemoryDebugger extension
See documentation in docs/topics/extensions.rst
"""
import gc
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.utils.trackref import live_refs
class MemoryDebugger(object):
def __init__(self, stats):
try:
import libxml2
self.libxml2 = libxml2
except ImportError:
self.libxml2 = None
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('MEMDEBUG_ENABLED'):
raise NotConfigured
o = cls(crawler.stats)
crawler.signals.connect(o.engine_started, signals.engine_started)
crawler.signals.connect(o.engine_stopped, signals.engine_stopped)
return o
def engine_started(self):
if self.libxml2:
self.libxml2.debugMemory(1)
def engine_stopped(self):
if self.libxml2:
self.libxml2.cleanupParser()
self.stats.set_value('memdebug/libxml2_leaked_bytes', self.libxml2.debugMemory(1))
gc.collect()
self.stats.set_value('memdebug/gc_garbage_count', len(gc.garbage))
for cls, wdict in live_refs.iteritems():
if not wdict:
continue
self.stats.set_value('memdebug/live_refs/%s' % cls.__name__, len(wdict))
|
bsd-3-clause
|
mrquim/mrquimrepo
|
repo/script.module.schism.common/lib/bs4/builder/_htmlparser.py
|
41
|
9205
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import HTMLParser
try:
from HTMLParser import HTMLParseError
except ImportError, e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
gpl-2.0
|
kenwang815/KodiPlugins
|
script.module.youtube.dl/lib/youtube_dl/extractor/condenast.py
|
11
|
6255
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
orderedSet,
remove_end,
)
class CondeNastIE(InfoExtractor):
"""
Condé Nast is a media group, some of its sites use a custom HTML5 player
that works the same in all of them.
"""
# The keys are the supported sites and the values are the name to be shown
# to the user and in the extractor description.
_SITES = {
'allure': 'Allure',
'architecturaldigest': 'Architectural Digest',
'arstechnica': 'Ars Technica',
'bonappetit': 'Bon Appétit',
'brides': 'Brides',
'cnevids': 'Condé Nast',
'cntraveler': 'Condé Nast Traveler',
'details': 'Details',
'epicurious': 'Epicurious',
'glamour': 'Glamour',
'golfdigest': 'Golf Digest',
'gq': 'GQ',
'newyorker': 'The New Yorker',
'self': 'SELF',
'teenvogue': 'Teen Vogue',
'vanityfair': 'Vanity Fair',
'vogue': 'Vogue',
'wired': 'WIRED',
'wmagazine': 'W Magazine',
}
_VALID_URL = r'https?://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed(?:js)?)/.+?' % '|'.join(_SITES.keys())
_TESTS = [{
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
'md5': '1921f713ed48aabd715691f774c451f7',
'info_dict': {
'id': '5171b343c2b4c00dd0c1ccb3',
'ext': 'mp4',
'title': '3D Printed Speakers Lit With LED',
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
}
}, {
# JS embed
'url': 'http://player.cnevids.com/embedjs/55f9cf8b61646d1acf00000c/5511d76261646d5566020000.js',
'md5': 'f1a6f9cafb7083bab74a710f65d08999',
'info_dict': {
'id': '55f9cf8b61646d1acf00000c',
'ext': 'mp4',
'title': '3D printed TSA Travel Sentry keys really do open TSA locks',
}
}]
def _extract_series(self, url, webpage):
title = self._html_search_regex(r'<div class="cne-series-info">.*?<h1>(.+?)</h1>',
webpage, 'series title', flags=re.DOTALL)
url_object = compat_urllib_parse_urlparse(url)
base_url = '%s://%s' % (url_object.scheme, url_object.netloc)
m_paths = re.finditer(r'<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]',
webpage, flags=re.DOTALL)
paths = orderedSet(m.group(1) for m in m_paths)
build_url = lambda path: compat_urlparse.urljoin(base_url, path)
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
return self.playlist_result(entries, playlist_title=title)
def _extract_video(self, webpage, url_type):
if url_type != 'embed':
description = self._html_search_regex(
[
r'<div class="cne-video-description">(.+?)</div>',
r'<div class="video-post-content">(.+?)</div>',
],
webpage, 'description', fatal=False, flags=re.DOTALL)
else:
description = None
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
'player params', flags=re.DOTALL)
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
data = compat_urllib_parse_urlencode({'videoId': video_id,
'playerId': player_id,
'target': target,
})
base_info_url = self._search_regex(r'url = [\'"](.+?)[\'"][,;]',
webpage, 'base info url',
default='http://player.cnevids.com/player/loader.js?')
info_url = base_info_url + data
info_page = self._download_webpage(info_url, video_id,
'Downloading video info')
video_info = self._search_regex(r'var\s+video\s*=\s*({.+?});', info_page, 'video info')
video_info = self._parse_json(video_info, video_id)
formats = [{
'format_id': '%s-%s' % (fdata['type'].split('/')[-1], fdata['quality']),
'url': fdata['src'],
'ext': fdata['type'].split('/')[-1],
'quality': 1 if fdata['quality'] == 'high' else 0,
} for fdata in video_info['sources'][0]]
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': video_info['title'],
'thumbnail': video_info['poster_frame'],
'description': description,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site = mobj.group('site')
url_type = mobj.group('type')
item_id = mobj.group('id')
# Convert JS embed to regular embed
if url_type == 'embedjs':
parsed_url = compat_urlparse.urlparse(url)
url = compat_urlparse.urlunparse(parsed_url._replace(
path=remove_end(parsed_url.path, '.js').replace('/embedjs/', '/embed/')))
url_type = 'embed'
self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site])
webpage = self._download_webpage(url, item_id)
if url_type == 'series':
return self._extract_series(url, webpage)
else:
return self._extract_video(webpage, url_type)
|
gpl-2.0
|
DBuildService/dbs-server
|
dbs/task_api.py
|
1
|
4794
|
from threading import Thread
from celery import Celery
from . import tasks
from .celery import app
__all__ = ('TaskApi', )
def watch_task(task, callback, kwargs=None):
"""
watch task until it ends and then execute callback:
callback(response, **kwargs)
where response is a result of task
:param task: task to watch
:param callback: function which is called when task finishes
:param kwargs: dict which is passed to callback
:return: None
"""
response = task.wait()
if kwargs:
callback(response, **kwargs)
else:
callback(response)
class TaskApi(object):
""" universal API for tasks which are executed on celery workers """
def build_docker_image(self, build_image, git_url, local_tag, git_dockerfile_path=None, git_commit=None,
parent_registry=None, target_registries=None, tag=None, repos=None,
callback=None, kwargs=None):
"""
build docker image from supplied git repo
TODO:
DockerFile has to be in root of the gitrepo, path and commit are not implemented yet
yum repos
:param build_image: name of the build image (supplied docker image is built inside this image)
:param git_url: url to git repo
:param local_tag: image is known within the service with this tag
:param git_dockerfile_path: path to dockerfile within git repo (default is ./Dockerfile)
:param git_commit: which commit to checkout (master by default)
:param parent_registry: pull base image from this registry
:param target_registries: list of urls where built image will be pushed
:param tag: tag image with this tag (and push it to target_repo if specified)
:param repos: list of yum repos to enable in image
:param callback: function to call when task finishes, it has to accept at least
one argument: return value of task
:param kwargs: dict which is pass to callback, callback is called like this:
callback(task_response, **kwargs)
:return: task_id
"""
args = (build_image, git_url, local_tag)
task_kwargs = {'parent_registry': parent_registry,
'target_registries': target_registries,
'tag': tag,
'git_commit': git_commit,
'git_dockerfile_path': git_dockerfile_path,
'repos': repos}
task_info = tasks.build_image.apply_async(args=args, kwargs=task_kwargs,
link=tasks.submit_results.s())
task_id = task_info.task_id
if callback:
t = Thread(target=watch_task, args=(task_info, callback, kwargs))
#w.daemon = True
t.start()
return task_id
def find_dockerfiles_in_git(self):
raise NotImplemented()
def push_docker_image(self, image_id, source_registry, target_registry, tags, callback=None, kwargs=None):
"""
pull docker image from source registry, tag it with multiple tags and push it to target registry
:param image_id: image to pull
:param source_registry: registry to pull from
:param target_registry: registry for pushing
:param tags: list of tags for image tagging
:param callback: function to call when task finishes, it has to accept at least
one argument: return value of task
:param kwargs: dict which is pass to callback, callback is called like this:
callback(task_response, **kwargs)
:return: task_id
"""
task_info = tasks.push_image.delay(image_id, source_registry, target_registry, tags)
task_id = task_info.task_id
if callback:
t = Thread(target=watch_task, args=(task_info, callback, kwargs))
#w.daemon = True
t.start()
return task_id
def desktop_callback(data):
""" show desktop notification when build finishes """
from pprint import pprint
pprint(data)
try:
from gi.repository import Notify
except ImportError:
pass
else:
Notify.init("Docker Build Service")
n = Notify.Notification.new(
"Docker Build Service",
"Docker Build of '%s' has finished." % data[0]['Id'],
"dialog-information"
)
n.show()
if __name__ == '__main__':
t = TaskApi()
t.build_docker_image(build_image="buildroot-fedora",
git_url="github.com/TomasTomecek/docker-hello-world.git",
local_tag="fedora-celery-build",
callback=desktop_callback)
|
bsd-3-clause
|
Kongsea/tensorflow
|
tensorflow/examples/learn/hdf5_classification.py
|
75
|
2899
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, hdf5 format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
import h5py # pylint: disable=g-bad-import-order
X_FEATURE = 'x' # Name of the input feature.
def main(unused_argv):
# Load dataset.
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple
# demonstration here.
h5f = h5py.File('/tmp/test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=x_train)
h5f.create_dataset('X_test', data=x_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('/tmp/test_hdf5.h5', 'r')
x_train = np.array(h5f['X_train'])
x_test = np.array(h5f['X_test'])
y_train = np.array(h5f['y_train'])
y_test = np.array(h5f['y_test'])
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column(
X_FEATURE, shape=np.array(x_train).shape[1:])]
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=200)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class_ids'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
suyashphadtare/vestasi-erp-new-final
|
erpnext/setup/doctype/company/fixtures/india/__init__.py
|
36
|
4647
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def install(company):
docs = [
{'doctype': 'Deduction Type', 'name': 'Professional Tax', 'description': 'Professional Tax', 'deduction_name': 'Professional Tax'},
{'doctype': 'Deduction Type', 'name': 'Provident Fund', 'description': 'Provident fund', 'deduction_name': 'Provident Fund'},
{'doctype': 'Earning Type', 'name': 'House Rent Allowance', 'description': 'House Rent Allowance', 'earning_name': 'House Rent Allowance', 'taxable': 'No'},
]
for d in docs:
try:
frappe.get_doc(d).insert()
except frappe.NameError:
pass
# accounts
fld_dict = {
'account_name': 0,
'parent_account': 1,
'group_or_ledger': 2,
'account_type': 3,
'report_type': 4,
'tax_rate': 5,
'root_type': 6
}
acc_list_india = [
[_('CENVAT Capital Goods'),_(_('Tax Assets')),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('CENVAT'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('CENVAT Service Tax'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('CENVAT Service Tax Cess 1'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('CENVAT Service Tax Cess 2'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('CENVAT Edu Cess'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('CENVAT SHE Cess'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('Excise Duty 4'),_('Tax Assets'),'Ledger','Tax','Balance Sheet','4.00', 'Asset'],
[_('Excise Duty 8'),_('Tax Assets'),'Ledger','Tax','Balance Sheet','8.00', 'Asset'],
[_('Excise Duty 10'),_('Tax Assets'),'Ledger','Tax','Balance Sheet','10.00', 'Asset'],
[_('Excise Duty 14'),_('Tax Assets'),'Ledger','Tax','Balance Sheet','14.00', 'Asset'],
[_('Excise Duty Edu Cess 2'),_('Tax Assets'),'Ledger','Tax','Balance Sheet','2.00', 'Asset'],
[_('Excise Duty SHE Cess 1'),_('Tax Assets'),'Ledger','Tax','Balance Sheet','1.00', 'Asset'],
[_('P L A'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('P L A - Cess Portion'),_('Tax Assets'),'Ledger','Chargeable','Balance Sheet', None, 'Asset'],
[_('Edu. Cess on Excise'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','2.00', 'Liability'],
[_('Edu. Cess on Service Tax'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','2.00', 'Liability'],
[_('Edu. Cess on TDS'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','2.00', 'Liability'],
[_('Excise Duty @ 4'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','4.00', 'Liability'],
[_('Excise Duty @ 8'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','8.00', 'Liability'],
[_('Excise Duty @ 10'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','10.00', 'Liability'],
[_('Excise Duty @ 14'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','14.00', 'Liability'],
[_('Service Tax'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','10.3', 'Liability'],
[_('SHE Cess on Excise'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','1.00', 'Liability'],
[_('SHE Cess on Service Tax'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','1.00', 'Liability'],
[_('SHE Cess on TDS'),_('Duties and Taxes'),'Ledger','Tax','Balance Sheet','1.00', 'Liability'],
[_('Professional Tax'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('VAT'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('TDS (Advertisement)'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('TDS (Commission)'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('TDS (Contractor)'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('TDS (Interest)'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('TDS (Rent)'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability'],
[_('TDS (Salary)'),_('Duties and Taxes'),'Ledger','Chargeable','Balance Sheet', None, 'Liability']
]
for lst in acc_list_india:
account = frappe.get_doc({
"doctype": "Account",
"freeze_account": "No",
"master_type": "",
"company": company.name
})
for d in fld_dict.keys():
account.set(d, (d == 'parent_account' and lst[fld_dict[d]]) and lst[fld_dict[d]] +' - '+ company.abbr or lst[fld_dict[d]])
account.insert()
|
agpl-3.0
|
anistark/demo
|
drsftask/drsftask/settings.py
|
1
|
2083
|
"""
Django settings for drsftask project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd+pgg=wsceh$9qsa8%$a^%!*7cq40ij*x=a(19_$%6wvrs0m1#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'todo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'drsftask.urls'
WSGI_APPLICATION = 'drsftask.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
mit
|
open-dynaMIX/experms
|
src/experms/configfile/check_ownerandgroup.py
|
1
|
2097
|
# -*- coding: utf-8 -*-
import sys
import pwd
import grp
def check_ownerandgroup(parser, section, oorg, debug):
if oorg == 'owner':
switch = "User"
else:
switch = "Group"
if not parser.has_option(section, oorg):
if debug:
print >> sys.stderr, ("[debug] '%s' in section '%s' is not set"
% (oorg, section))
return None
tempowner = parser.get(section, oorg)
if tempowner in ['', 'None', 'none']:
if debug:
print >> sys.stderr, ("[debug] '%s' in section '%s' is not "
"set" % (oorg, section))
return None
try:
tempowner = int(tempowner)
except ValueError:
try:
if oorg == 'owner':
pwd.getpwnam(tempowner)
else:
grp.getgrnam(tempowner)
except KeyError:
owner = False
print >> sys.stderr, ("Error in section '%s': %s '%s' "
"doesn't exist" % (section, switch,
tempowner))
else:
# save the user/group as uid
if oorg == 'owner':
owner = pwd.getpwnam(tempowner).pw_uid
else:
owner = grp.getgrnam(tempowner).gr_gid
if debug:
print >> sys.stderr, ("[debug] '%s' in section '%s' "
"is valid" % (oorg, section))
else:
try:
if oorg == 'owner':
pwd.getpwuid(tempowner)
else:
grp.getgrgid(tempowner)
except KeyError:
print >> sys.stderr, ("Error in section '%s': %s '%s' "
"doesn't exist" % (section, switch,
tempowner))
owner = False
else:
owner = tempowner
if debug:
print >> sys.stderr, ("[debug] '%s' in section '%s' "
"is valid" % (oorg, section))
return owner
|
gpl-3.0
|
indashnet/InDashNet.Open.UN2000
|
android/external/llvm/utils/wciia.py
|
16
|
2943
|
#!/usr/bin/env python
"""
wciia - Whose Code Is It Anyway
Determines code owner of the file/folder relative to the llvm source root.
Code owner is determined from the content of the CODE_OWNERS.TXT
by parsing the D: field
usage:
utils/wciia.py path
limitations:
- must be run from llvm source root
- very simplistic algorithm
- only handles * as a wildcard
- not very user friendly
- does not handle the proposed F: field
"""
import os
code_owners = {}
def process_files_and_folders(owner):
filesfolders = owner['filesfolders']
# paths must be in ( ... ) so strip them
lpar = filesfolders.find('(')
rpar = filesfolders.rfind(')')
if rpar <= lpar:
# give up
return
paths = filesfolders[lpar+1:rpar]
# split paths
owner['paths'] = []
for path in paths.split():
owner['paths'].append(path)
def process_code_owner(owner):
if 'filesfolders' in owner:
filesfolders = owner['filesfolders']
else:
# print "F: field missing, using D: field"
owner['filesfolders'] = owner['description']
process_files_and_folders(owner)
code_owners[owner['name']] = owner
# process CODE_OWNERS.TXT first
code_owners_file = open("CODE_OWNERS.TXT", "r").readlines()
code_owner = {}
for line in code_owners_file:
for word in line.split():
if word == "N:":
name = line[2:].strip()
if code_owner:
process_code_owner(code_owner)
code_owner = {}
# reset the values
code_owner['name'] = name
if word == "E:":
email = line[2:].strip()
code_owner['email'] = email
if word == "D:":
description = line[2:].strip()
code_owner['description'] = description
if word == "F:":
filesfolders = line[2:].strip()
code_owner['filesfolders'].append(filesfolders)
def find_owners(fpath):
onames = []
lmatch = -1
# very simplistic way of findning the best match
for name in code_owners:
owner = code_owners[name]
if 'paths' in owner:
for path in owner['paths']:
# print "searching (" + path + ")"
# try exact match
if fpath == path:
return name
# see if path ends with a *
rstar = path.rfind('*')
if rstar>0:
# try the longest match,
rpos = -1
if len(fpath) < len(path):
rpos = path.find(fpath)
if rpos == 0:
onames.append(name)
onames.append('Chris Lattner')
return onames
# now lest try to find the owner of the file or folder
import sys
if len(sys.argv) < 2:
print "usage " + sys.argv[0] + " file_or_folder"
exit(-1)
# the path we are checking
path = str(sys.argv[1])
# check if this is real path
if not os.path.exists(path):
print "path (" + path + ") does not exist"
exit(-1)
owners_name = find_owners(path)
# be gramatically correct
print "The owner(s) of the (" + path + ") is(are) : " + str(owners_name)
exit(0)
# bottom up walk of the current .
# not yet used
root = "."
for dir,subdirList,fileList in os.walk( root , topdown=False ) :
print "dir :" , dir
for fname in fileList :
print "-" , fname
print
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.