repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
ClearCorp-dev/odoo | refs/heads/8.0 | addons/point_of_sale/__init__.py | 378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_bank_statement
import controllers
import point_of_sale
import report
import res_users
import res_partner
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
runt18/nupic | refs/heads/master | examples/opf/experiments/anomaly/spatial/2fields_many_skewed/description.py | 160 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'),
('numericFieldNameB', 'sum'),
('categoryFieldNameC', 'first')],
'hours': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'NontemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21),
'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21),
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
# [optional] A sequence of one or more tasks that describe what to do with the
# model. Each task consists of a task label, an input spec., iteration count,
# and a task-control spec per opfTaskSchema.json
#
# NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver.
# Clients that interact with OPFExperiment directly do not make use of
# the tasks specification.
#
control = dict(
environment='opfExperiment',
tasks = [
{
# Task label; this label string may be used for diagnostic logging and for
# constructing filenames or directory pathnames for task-specific files, etc.
'taskLabel' : "Anomaly",
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'test_NoProviders',
'version': 1,
'streams': [
{
'columns': ['*'],
'info': 'my simple dataset',
'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
# TODO: Aggregation is not supported yet by run_opf_experiment.py
#'aggregation' : config['aggregationInfo']
},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json)
'taskControl' : {
# Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX
# instances.
'iterationCycle' : [
#IterationPhaseSpecLearnOnly(1000),
IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None),
#IterationPhaseSpecInferOnly(10, inferenceArgs=None),
],
'metrics' : [
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
# Callbacks for experimentation/research (optional)
'callbacks' : {
# Callbacks to be called at the beginning of a task, before model iterations.
# Signature: callback(<reference to OPFExperiment>); returns nothing
# 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb],
# 'setup' : [claModelControlDisableTPLearningCb],
'setup' : [],
# Callbacks to be called after every learning/inference iteration
# Signature: callback(<reference to OPFExperiment>); returns nothing
'postIter' : [],
# Callbacks to be called when the experiment task is finished
# Signature: callback(<reference to OPFExperiment>); returns nothing
'finish' : []
}
} # End of taskControl
}, # End of task
]
)
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
|
lemon-pi/linux-3.x | refs/heads/master | tools/perf/tests/attr.py | 3174 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
StephenHamilton/gini | refs/heads/master | backend/src/gloader/xml/marshal/__init__.py | 10 | """Converting Python objects to XML and back again.
xml.marshal.generic
Marshals simple Python data types into a custom XML format. The
Marshaller and Unmarshaller classes can be subclassed in order to
implement marshalling into a different XML DTD.
xml.marshal.wddx
Marshals Python data types into the WDDX DTD.
"""
__all__ = ['generic', 'wddx']
|
jeromecc/doctoctocbot | refs/heads/master | src/status/tests.py | 35 | from django.test import TestCase
# Create your tests here.
|
pitch-sands/i-MPI | refs/heads/master | flask/Lib/site-packages/pip-1.5.6-py2.7.egg/pip/_vendor/requests/structures.py | 279 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
|
nightism/addressbook-level4 | refs/heads/master | pandoc/latexfigure.py | 4 | #!/usr/bin/env python3
from panflute import *
import re
import bs4
"""
Converts images represented in HTML blocks of
<figure>
<img src="...">
<figcaption>Figure caption</figcaption>
</figure>
into proper LaTeX figures.
"""
def latex_figure(elem, doc):
if type(elem) != RawBlock or elem.format != 'html':
return
root = bs4.BeautifulSoup(elem.text, 'html.parser')
if not root.figure:
return
ltx = [r'\begin{figure}[H]', '\centering']
if root.figure.img:
src = root.figure.img['src']
ltx.append(r'\includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{'
+ src + '}')
if root.figure.figcaption:
caption = root.figure.figcaption.get_text().strip()
match = re.match(r'^Figure [\d.]+:\s+', caption)
if match:
caption = caption[match.end():]
ltx.append(r'\caption{' + caption + '}')
ltx.append(r'\end{figure}')
return RawBlock('\n'.join(ltx), format='latex')
if __name__ == '__main__':
toJSONFilter(latex_figure)
|
FocusTheOne/Qomolangma | refs/heads/master | Qomolangma/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/actions/src/subdir2/make-file.py | 973 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = "Hello from make-file.py\n"
open(sys.argv[1], 'wb').write(contents)
|
smmribeiro/intellij-community | refs/heads/master | python/testData/resolve/multiFile/resolveQualifiedSuperClassInPackage/foo/baz.py | 83 | class SuperDuper(object):
def copy(self): print "duper"
|
sammcveety/incubator-beam | refs/heads/master | sdks/python/apache_beam/examples/complete/autocomplete_test.py | 8 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the autocomplete example."""
import unittest
import apache_beam as beam
from apache_beam.examples.complete import autocomplete
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class AutocompleteTest(unittest.TestCase):
WORDS = ['this', 'this', 'that', 'to', 'to', 'to']
def test_top_prefixes(self):
with TestPipeline() as p:
words = p | beam.Create(self.WORDS)
result = words | autocomplete.TopPerPrefix(5)
# values must be hashable for now
result = result | beam.Map(lambda (k, vs): (k, tuple(vs)))
assert_that(result, equal_to(
[
('t', ((3, 'to'), (2, 'this'), (1, 'that'))),
('to', ((3, 'to'), )),
('th', ((2, 'this'), (1, 'that'))),
('thi', ((2, 'this'), )),
('this', ((2, 'this'), )),
('tha', ((1, 'that'), )),
('that', ((1, 'that'), )),
]))
if __name__ == '__main__':
unittest.main()
|
Tong-Chen/scikit-learn | refs/heads/master | sklearn/datasets/california_housing.py | 11 | """California housing dataset.
The original database is available from StatLib
http://lib.stat.cmu.edu/
The data contains 20,640 observations on 9 variables.
References
----------
Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,
Statistics and Probability Letters, 33 (1997) 291-297.
"""
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
from zipfile import ZipFile
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.request import urlopen
import numpy as np
from .base import get_data_home, Bunch
from ..externals import joblib
DATA_URL = "http://lib.stat.cmu.edu/modules.php?op=modload&name=Downloads&"\
"file=index&req=getit&lid=83"
TARGET_FILENAME = "cal_housing.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_california_housing(data_home=None, download_if_missing=True):
"""Loader for the California housing dataset from StatLib.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
------
This dataset consists of 20,640 samples and 9 features.
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Cal. housing from %s to %s' % (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
try:
cadata_fd = zip_file.open('cadata.txt', 'r')
cadata = BytesIO(cadata_fd.read())
# skip the first 27 lines (documentation)
cal_housing = np.loadtxt(cadata, skiprows=27)
joblib.dump(cal_housing, join(data_home, TARGET_FILENAME),
compress=6)
finally:
zip_file.close()
else:
cal_housing = joblib.load(join(data_home, TARGET_FILENAME))
feature_names = ["MedInc", "HouseAge", "AveRooms", "AveBedrms",
"Population", "AveOccup", "Latitude", "Longitude"]
target, data = cal_housing[:, 0], cal_housing[:, 1:]
# avg rooms = total rooms / households
data[:, 2] /= data[:, 5]
# avg bed rooms = total bed rooms / households
data[:, 3] /= data[:, 5]
# avg occupancy = population / housholds
data[:, 5] = data[:, 4] / data[:, 5]
# target in units of 100,000
target = target / 100000.0
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=MODULE_DOCS)
|
CodeDJ/qt5-hidpi | refs/heads/master | qt/qtwebkit/Tools/QueueStatusServer/model/queues_unittest.py | 123 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from model.queues import Queue
class QueueTest(unittest.TestCase):
def test_is_ews(self):
mac_ews = Queue("mac-ews")
self.assertTrue(mac_ews.is_ews())
def test_queue_with_name(self):
self.assertEqual(Queue.queue_with_name("bogus"), None)
self.assertEqual(Queue.queue_with_name("mac-ews").name(), "mac-ews")
self.assertRaises(AssertionError, Queue, ("bogus"))
def _assert_short_name(self, queue_name, short_name):
self.assertEqual(Queue(queue_name).short_name(), short_name)
def test_short_name(self):
self._assert_short_name("mac-ews", "Mac")
self._assert_short_name("commit-queue", "Commit")
self._assert_short_name("style-queue", "Style")
def _assert_display_name(self, queue_name, short_name):
self.assertEqual(Queue(queue_name).display_name(), short_name)
def test_display_name(self):
self._assert_display_name("mac-ews", "Mac EWS")
self._assert_display_name("commit-queue", "Commit Queue")
self._assert_display_name("style-queue", "Style Queue")
def _assert_name_with_underscores(self, queue_name, short_name):
self.assertEqual(Queue(queue_name).name_with_underscores(), short_name)
def test_name_with_underscores(self):
self._assert_name_with_underscores("mac-ews", "mac_ews")
self._assert_name_with_underscores("commit-queue", "commit_queue")
def test_style_queue_is_ews(self):
# For now we treat the style-queue as an EWS since most users would
# describe it as such. If is_ews() ever needs to mean "builds the patch"
# or similar, then we will need to adjust all callers.
self.assertTrue(Queue("style-queue").is_ews())
self.assertTrue("style-queue" in map(Queue.name, Queue.all_ews()))
if __name__ == '__main__':
unittest.main()
|
kikong/electron | refs/heads/master | tools/coffee2c.py | 5 | #!/usr/bin/env python
import os
import subprocess
import sys
SOURCE_ROOT = os.path.dirname(os.path.dirname(__file__))
def main():
natives = sys.argv[1]
coffee_source_files = sys.argv[2:]
output_dir = os.path.dirname(natives)
js_source_files = compile_coffee(coffee_source_files, output_dir)
call_js2c(natives, js_source_files)
def compile_coffee(coffee_source_files, output_dir):
js_source_files = []
for source_file in coffee_source_files:
output_filename = os.path.splitext(source_file)[0] + '.js'
output_path = os.path.join(output_dir, output_filename)
js_source_files.append(output_path)
call_compile_coffee(source_file, output_path)
return js_source_files
def call_compile_coffee(source_file, output_filename):
compile_coffee = os.path.join(SOURCE_ROOT, 'tools', 'compile-coffee.py')
subprocess.check_call([sys.executable, compile_coffee, source_file,
output_filename])
def call_js2c(natives, js_source_files):
js2c = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'tools', 'js2c.py')
subprocess.check_call([sys.executable, js2c, natives] + js_source_files)
if __name__ == '__main__':
sys.exit(main())
|
santileortiz/Order-type-viewer | refs/heads/master | mkpy/pymk.py | 1 | # THIS IS NOT PYTHON CODE, it's BASH code installed into /usr/share/bash-completion/completions/
# to enable tab completions.
_pymk()
{
local cur prev words cword
_init_completion || return
res="$(./pymk.py --get_completions "$COMP_POINT $COMP_LINE")"
COMPREPLY=( $( compgen -W '$res' -- "$cur" ) )
[[ $COMPREPLY ]] || \
COMPREPLY=( $( compgen -f -- "$cur" ) )
} &&
complete -F _pymk "./pymk.py"
# ex: ts=4 sw=4 et filetype=sh
|
itsjeyd/edx-platform | refs/heads/master | common/djangoapps/util/memcache.py | 251 | """
This module provides a KEY_FUNCTION suitable for use with a memcache backend
so that we can cache any keys, not just ones that memcache would ordinarily accept
"""
from django.utils.encoding import smart_str
import hashlib
import urllib
def fasthash(string):
"""
Hashes `string` into a string representation of a 128-bit digest.
"""
md4 = hashlib.new("md4")
md4.update(string)
return md4.hexdigest()
def cleaned_string(val):
"""
Converts `val` to unicode and URL-encodes special characters
(including quotes and spaces)
"""
return urllib.quote_plus(smart_str(val))
def safe_key(key, key_prefix, version):
"""
Given a `key`, `key_prefix`, and `version`,
return a key that is safe to use with memcache.
`key`, `key_prefix`, and `version` can be numbers, strings, or unicode.
"""
# Clean for whitespace and control characters, which
# cause memcache to raise an exception
key = cleaned_string(key)
key_prefix = cleaned_string(key_prefix)
version = cleaned_string(version)
# Attempt to combine the prefix, version, and key
combined = ":".join([key_prefix, version, key])
# If the total length is too long for memcache, hash it
if len(combined) > 250:
combined = fasthash(combined)
# Return the result
return combined
|
miludmann/trainstalker | refs/heads/master | trainstalker/sncfdata_rt/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
bfagundez/apex_paperboy | refs/heads/master | test/tooling_tests.py | 1 | #TO RUN: joey2 project_create_tests.py
# OR TO RUN SPECIFIC METHODS:
# joey2 -m unittest project_tests.TestProjectCreate.test_create_project_via_package_xml_file
# joey2 -m unittest project_tests.TestProjectCreate.test_create_project_via_package_dict
import sys
import os
import unittest
import shutil
import requests
sys.path.append('../')
import lib.config as config
import lib.mm_util as util
import test_helper as helper
from lib.mm_client import MavensMateClient
class TestToolingAPI(unittest.TestCase):
# FYI: overriding this constructor is apparently not recommended, so we should find a better way to init test data
def __init__(self, *args, **kwargs):
super(TestToolingAPI, self).__init__(*args, **kwargs)
self.username = '[email protected]'
self.password = 'force'
self.org_type = 'developer'
self.client = MavensMateClient(credentials={
"username" : self.username,
"password" : self.password,
"org_type" : self.org_type
})
def setUp(self):
pass
def test_overlay_actions(self):
payload = {
"ActionScriptType" : "None",
"ExecutableEntityId" : "01pd0000001yXtYAAU",
"IsDumpingHeap" : True,
"Iteration" : 1,
"Line" : 3,
"ScopeId" : "005d0000000xxzsAAA"
}
list_result = self.client.get_overlay_actions(id='01pd0000001yXtYAAU')
print list_result
create_result = self.client.create_overlay_action(payload)
print create_result
list_result = self.client.get_overlay_actions(id='01pd0000001yXtYAAU')
print list_result
# delete_result = self.client.remove_overlay_action(id='01pd0000001yXtYAAU', line_number=3)
# print delete_result
# list_result = self.client.get_overlay_actions(id='01pd0000001yXtYAAU')
# print list_result
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main() |
lamdnhan/osf.io | refs/heads/master | website/addons/base/exceptions.py | 23 | """
Custom exceptions for add-ons.
"""
class AddonError(Exception):
pass
class HookError(AddonError):
pass
class AddonEnrichmentError(AddonError):
@property
def can_delete(self):
return False
@property
def can_download(self):
return False
@property
def renderable_error(self):
'''A hook to be implemented by subclasses returning
a html error to be displayed to the user
Later concatenated with additional style tags
'''
return '''
<div class="alert alert-info" role="alert">
This file is currently unable to be rendered. <br>
If this should not have occurred and the issue persists,
please report it to <a href="mailto:[email protected]">[email protected]
</div>
'''
def as_html(self):
# TODO Refactor me to be all in the front end
# 2/10/14 ping @chrisseto when refactoring
additional = ''
if not self.can_download:
additional += "<style>.file-download{display: none;}</style>"
if not self.can_delete:
additional += "<style>.file-delete{display: none;}</style>"
return self.renderable_error + additional
class FileDeletedError(AddonEnrichmentError):
@property
def renderable_error(self):
return '''
<div class="alert alert-info" role="alert">
This file has been deleted.
</div>
'''
class FileDoesntExistError(AddonEnrichmentError):
@property
def renderable_error(self):
return '''
<div class="alert alert-info" role="alert">
This file does not exist.
</div>
'''
|
MihaiMoldovanu/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_keystone_endpoint.py | 25 | #!/usr/bin/python
# Copyright: (c) 2017, VEXXHOST, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_endpoint
short_description: Manage OpenStack Identity service endpoints
extends_documentation_fragment: openstack
author:
- Mohammed Naser (@mnaser)
- Alberto Murillo (@albertomurillo)
version_added: "2.5"
description:
- Create, update, or delete OpenStack Identity service endpoints. If a
service with the same combination of I(service), I(interface) and I(region)
exist, the I(url) and I(state) (C(present) or C(absent)) will be updated.
options:
service:
description:
- Name or id of the service.
required: true
interface:
description:
- Interface of the service.
choices: [admin, public, internal]
required: true
url:
description:
- URL of the service.
required: true
region:
description:
- Region that the service belongs to. Note that I(region_name) is used for authentication.
enabled:
description:
- Is the service enabled.
default: True
state:
description:
- Should the resource be C(present) or C(absent).
choices: [present, absent]
default: present
requirements:
- shade >= 1.11.0
'''
EXAMPLES = '''
- name: Create a service for glance
os_keystone_endpoint:
cloud: mycloud
service: glance
interface: public
url: http://controller:9292
region: RegionOne
state: present
- name: Delete a service for nova
os_keystone_endpoint:
cloud: mycloud
service: nova
interface: public
region: RegionOne
state: absent
'''
RETURN = '''
endpoint:
description: Dictionary describing the endpoint.
returned: On success when I(state) is C(present)
type: complex
contains:
id:
description: Endpoint ID.
type: string
sample: 3292f020780b4d5baf27ff7e1d224c44
region:
description: Region Name.
type: string
sample: RegionOne
service_id:
description: Service ID.
type: string
sample: b91f1318f735494a825a55388ee118f3
interface:
description: Endpoint Interface.
type: string
sample: public
url:
description: Service URL.
type: string
sample: http://controller:9292
enabled:
description: Service status.
type: boolean
sample: True
'''
from distutils.version import StrictVersion
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
def _needs_update(module, endpoint):
if endpoint.enabled != module.params['enabled']:
return True
if endpoint.url != module.params['url']:
return True
return False
def _system_state_change(module, endpoint):
state = module.params['state']
if state == 'absent' and endpoint:
return True
if state == 'present':
if endpoint is None:
return True
return _needs_update(module, endpoint)
return False
def main():
argument_spec = openstack_full_argument_spec(
service=dict(type='str', required=True),
interface=dict(type='str', required=True, choices=['admin', 'public', 'internal']),
url=dict(type='str', required=True),
region=dict(type='str'),
enabled=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.11.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.11.0")
service_name_or_id = module.params['service']
interface = module.params['interface']
url = module.params['url']
region = module.params['region']
enabled = module.params['enabled']
state = module.params['state']
try:
cloud = shade.operator_cloud(**module.params)
service = cloud.get_service(service_name_or_id)
if service is None:
module.fail_json(msg='Service %s does not exist' % service_name_or_id)
filters = dict(service_id=service.id, interface=interface)
if region is not None:
filters['region'] = region
endpoints = cloud.search_endpoints(filters=filters)
if len(endpoints) > 1:
module.fail_json(msg='Service %s, interface %s and region %s are '
'not unique' %
(service_name_or_id, interface, region))
elif len(endpoints) == 1:
endpoint = endpoints[0]
else:
endpoint = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, endpoint))
if state == 'present':
if endpoint is None:
result = cloud.create_endpoint(service_name_or_id=service,
url=url, interface=interface,
region=region, enabled=enabled)
endpoint = result[0]
changed = True
else:
if _needs_update(module, endpoint):
endpoint = cloud.update_endpoint(
endpoint.id, url=url, enabled=enabled)
changed = True
else:
changed = False
module.exit_json(changed=changed, endpoint=endpoint)
elif state == 'absent':
if endpoint is None:
changed = False
else:
cloud.delete_endpoint(endpoint.id)
changed = True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
quinot/ansible | refs/heads/devel | lib/ansible/plugins/connection/lxd.py | 44 | # (c) 2016 Matt Clay <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Matt Clay <[email protected]>
connection: lxd
short_description: Run tasks in lxc containers via lxc CLI
description:
- Run commands or put/fetch files to an existing lxc container using lxc CLI
version_added: "2.0"
options:
remote_addr:
description:
- Container identifier
default: The set user as per docker's configuration
vars:
- name: ansible_host
- name: ansible_lxd_host
executable:
description:
- shell to use for execution inside container
default: /bin/sh
vars:
- name: ansible_executable
- name: ansible_lxd_executable
"""
import os
from distutils.spawn import find_executable
from subprocess import call, Popen, PIPE
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
""" lxd based connections """
transport = "lxd"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._host = self._play_context.remote_addr
self._lxc_cmd = find_executable("lxc")
if not self._lxc_cmd:
raise AnsibleError("lxc command not found in PATH")
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
self._display.warning('lxd does not support remote_user, using container default: root')
def _connect(self):
"""connect to lxd (nothing to do here) """
super(Connection, self)._connect()
if not self._connected:
self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate(in_data)
stdout = to_text(stdout)
stderr = to_text(stderr)
if stderr == "error: Container is not running.\n":
raise AnsibleConnectionFailure("container not running: %s" % self._host)
if stderr == "error: not found\n":
raise AnsibleConnectionFailure("container not found: %s" % self._host)
return process.returncode, stdout, stderr
def put_file(self, in_path, out_path):
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
call(local_cmd)
def fetch_file(self, in_path, out_path):
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
call(local_cmd)
def close(self):
""" close the connection (nothing to do here) """
super(Connection, self).close()
self._connected = False
|
JulienMcJay/eclock | refs/heads/master | windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/test/test_pywintypes.py | 15 | import sys
import unittest
import pywintypes
import time
from pywin32_testutil import str2bytes, ob2memory
import datetime
import operator
class TestCase(unittest.TestCase):
def testPyTimeFormat(self):
struct_current = time.localtime()
pytime_current = pywintypes.Time(struct_current)
# try and test all the standard parts of the format
# Note we used to include '%Z' testing, but that was pretty useless as
# it always returned the local timezone.
format_strings = "%a %A %b %B %c %d %H %I %j %m %M %p %S %U %w %W %x %X %y %Y"
for fmt in format_strings.split():
v1 = pytime_current.Format(fmt)
v2 = time.strftime(fmt, struct_current)
self.assertEquals(v1, v2, "format %s failed - %r != %r" % (fmt, v1, v2))
def testPyTimePrint(self):
# This used to crash with an invalid, or too early time.
# We don't really want to check that it does cause a ValueError
# (as hopefully this wont be true forever). So either working, or
# ValueError is OK.
try:
t = pywintypes.Time(-2)
t.Format()
except ValueError:
return
def testTimeInDict(self):
d = {}
d['t1'] = pywintypes.Time(1)
self.failUnlessEqual(d['t1'], pywintypes.Time(1))
def testPyTimeCompare(self):
t1 = pywintypes.Time(100)
t1_2 = pywintypes.Time(100)
t2 = pywintypes.Time(101)
self.failUnlessEqual(t1, t1_2)
self.failUnless(t1 <= t1_2)
self.failUnless(t1_2 >= t1)
self.failIfEqual(t1, t2)
self.failUnless(t1 < t2)
self.failUnless(t2 > t1 )
def testPyTimeCompareOther(self):
t1 = pywintypes.Time(100)
t2 = None
self.failIfEqual(t1, t2)
def testTimeTuple(self):
now = datetime.datetime.now() # has usec...
# timetuple() lost usec - pt must be <=...
pt = pywintypes.Time(now.timetuple())
# *sob* - only if we have a datetime object can we compare like this.
if isinstance(pt, datetime.datetime):
self.failUnless(pt <= now)
def testTimeTuplems(self):
now = datetime.datetime.now() # has usec...
tt = now.timetuple() + (now.microsecond // 1000,)
pt = pywintypes.Time(tt)
# we can't compare if using the old type, as it loses all sub-second res.
if isinstance(pt, datetime.datetime):
self.failUnlessEqual(now, pt)
def testPyTimeFromTime(self):
t1 = pywintypes.Time(time.time())
self.failUnless(pywintypes.Time(t1) is t1)
def testGUID(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
iid2 = pywintypes.IID(ob2memory(iid), True)
self.assertEquals(iid, iid2)
self.assertRaises(ValueError, pywintypes.IID, str2bytes('00'), True) # too short
self.assertRaises(TypeError, pywintypes.IID, 0, True) # no buffer
def testGUIDRichCmp(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
self.failIf(s==None)
self.failIf(None==s)
self.failUnless(s!=None)
self.failUnless(None!=s)
if sys.version_info > (3,0):
self.assertRaises(TypeError, operator.gt, None, s)
self.assertRaises(TypeError, operator.gt, s, None)
self.assertRaises(TypeError, operator.lt, None, s)
self.assertRaises(TypeError, operator.lt, s, None)
def testGUIDInDict(self):
s = "{00020400-0000-0000-C000-000000000046}"
iid = pywintypes.IID(s)
d = dict(item=iid)
self.failUnlessEqual(d['item'], iid)
if __name__ == '__main__':
unittest.main()
|
tombstone/models | refs/heads/master | research/object_detection/builders/optimizer_builder.py | 1 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to build DetectionModel training optimizers."""
import tensorflow.compat.v1 as tf
from object_detection.utils import learning_schedules
try:
from tensorflow.contrib import opt as tf_opt # pylint: disable=g-import-not-at-top
except: # pylint: disable=bare-except
pass
def build_optimizers_tf_v1(optimizer_config, global_step=None):
"""Create a TF v1 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
optimizer = tf_opt.MovingAverageOptimizer(
optimizer, average_decay=optimizer_config.moving_average_decay)
return optimizer, summary_vars
def build_optimizers_tf_v2(optimizer_config, global_step=None):
"""Create a TF v2 compatible optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
optimizer_type = optimizer_config.WhichOneof('optimizer')
optimizer = None
summary_vars = []
if optimizer_type == 'rms_prop_optimizer':
config = optimizer_config.rms_prop_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate,
decay=config.decay,
momentum=config.momentum_optimizer_value,
epsilon=config.epsilon)
if optimizer_type == 'momentum_optimizer':
config = optimizer_config.momentum_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.SGD(
learning_rate,
momentum=config.momentum_optimizer_value)
if optimizer_type == 'adam_optimizer':
config = optimizer_config.adam_optimizer
learning_rate = _create_learning_rate(config.learning_rate,
global_step=global_step)
summary_vars.append(learning_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=config.epsilon)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_type)
if optimizer_config.use_moving_average:
raise ValueError('Moving average not supported in eager mode.')
return optimizer, summary_vars
def build(config, global_step=None):
if tf.executing_eagerly():
return build_optimizers_tf_v2(config, global_step)
else:
return build_optimizers_tf_v1(config, global_step)
def _create_learning_rate(learning_rate_config, global_step=None):
"""Create optimizer learning rate based on config.
Args:
learning_rate_config: A LearningRate proto message.
global_step: A variable representing the current step.
If None, defaults to tf.train.get_or_create_global_step()
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
if global_step is None:
global_step = tf.train.get_or_create_global_step()
learning_rate = None
learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
if learning_rate_type == 'constant_learning_rate':
config = learning_rate_config.constant_learning_rate
learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
name='learning_rate')
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config.exponential_decay_learning_rate
learning_rate = learning_schedules.exponential_decay_with_burnin(
global_step,
config.initial_learning_rate,
config.decay_steps,
config.decay_factor,
burnin_learning_rate=config.burnin_learning_rate,
burnin_steps=config.burnin_steps,
min_learning_rate=config.min_learning_rate,
staircase=config.staircase)
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config.manual_step_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
learning_rate = learning_schedules.manual_stepping(
global_step, learning_rate_step_boundaries,
learning_rate_sequence, config.warmup)
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
learning_rate = learning_schedules.cosine_decay_with_warmup(
global_step,
config.learning_rate_base,
config.total_steps,
config.warmup_learning_rate,
config.warmup_steps,
config.hold_base_rate_steps)
if learning_rate is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return learning_rate
|
fengliu2014/audfprint | refs/heads/master | profile_audfprint.py | 5 | import audfprint
import cProfile
import pstats
argv = ["audfprint", "new", "-d", "tmp.fpdb", "--density", "200", "Nine_Lives/01-Nine_Lives.mp3", "Nine_Lives/02-Falling_In_Love.mp3", "Nine_Lives/03-Hole_In_My_Soul.mp3", "Nine_Lives/04-Taste_Of_India.mp3", "Nine_Lives/05-Full_Circle.mp3", "Nine_Lives/06-Something_s_Gotta_Give.mp3", "Nine_Lives/07-Ain_t_That_A_Bitch.mp3", "Nine_Lives/08-The_Farm.mp3", "Nine_Lives/09-Crash.mp3", "Nine_Lives/10-Kiss_Your_Past_Good-bye.mp3", "Nine_Lives/11-Pink.mp3", "Nine_Lives/12-Attitude_Adjustment.mp3", "Nine_Lives/13-Fallen_Angels.mp3"]
cProfile.run('audfprint.main(argv)', 'fpstats')
p = pstats.Stats('fpstats')
p.sort_stats('time')
p.print_stats(10)
|
alu042/edx-platform | refs/heads/master | lms/djangoapps/django_comment_client/tests/unicode.py | 206 | # coding=utf-8
class UnicodeTestMixin(object):
def test_ascii(self):
self._test_unicode_data(u"This post contains ASCII.")
def test_latin_1(self):
self._test_unicode_data(u"Thís pøst çòñtáins Lätin-1 tæxt")
def test_CJK(self):
self._test_unicode_data(u"イんノ丂 アo丂イ co刀イムノ刀丂 cフズ")
def test_non_BMP(self):
self._test_unicode_data(u"𝕋𝕙𝕚𝕤 𝕡𝕠𝕤𝕥 𝕔𝕠𝕟𝕥𝕒𝕚𝕟𝕤 𝕔𝕙𝕒𝕣𝕒𝕔𝕥𝕖𝕣𝕤 𝕠𝕦𝕥𝕤𝕚𝕕𝕖 𝕥𝕙𝕖 𝔹𝕄ℙ")
def test_special_chars(self):
self._test_unicode_data(u"\" This , post > contains < delimiter ] and [ other } special { characters ; that & may ' break things")
def test_string_interp(self):
self._test_unicode_data(u"This post contains %s string interpolation #{syntax}")
|
neiudemo1/django | refs/heads/master | django/contrib/admindocs/tests/test_fields.py | 638 | from __future__ import unicode_literals
import unittest
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(
AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
|
germanponce/pos-addons | refs/heads/8.0 | pos_multi_session/__init__.py | 9 | import pos_multi_session_models
import controllers
|
mcloudv/fuel-ostf | refs/heads/master | fuel_health/tests/smoke/test_create_volume.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_health import nmanager
LOG = logging.getLogger(__name__)
class VolumesTest(nmanager.SmokeChecksTest):
@classmethod
def setUpClass(cls):
super(VolumesTest, cls).setUpClass()
if cls.manager.clients_initialized:
cls.micro_flavors = cls.find_micro_flavor()
def setUp(self):
super(VolumesTest, self).setUp()
self.check_clients_state()
if (not self.config.volume.cinder_node_exist
and not self.config.volume.ceph_exist):
self.skipTest('There are no cinder nodes or '
'ceph storage for volume')
if not self.config.compute.compute_nodes:
self.skipTest('There are no compute nodes')
self.check_image_exists()
@classmethod
def tearDownClass(cls):
super(VolumesTest, cls).tearDownClass()
def _wait_for_volume_status(self, volume, status):
self.status_timeout(self.volume_client.volumes, volume.id, status)
def _wait_for_instance_status(self, server, status):
self.status_timeout(self.compute_client.servers, server.id, status)
def test_volume_create(self):
"""Create volume and attach it to instance
Target component: Compute
Scenario:
1. Create a new small-size volume.
2. Wait for volume status to become "available".
3. Check volume has correct name.
4. Create new instance.
5. Wait for "Active" status
6. Attach volume to an instance.
7. Check volume status is "in use".
8. Get information on the created volume by its id.
9. Detach volume from the instance.
10. Check volume has "available" status.
11. Delete volume.
12. Verify that volume deleted
13. Delete server.
Duration: 350 s.
"""
msg_s1 = 'Volume was not created.'
# Create volume
volume = self.verify(120, self._create_volume, 1,
msg_s1,
"volume creation",
self.volume_client)
self.verify(200, self._wait_for_volume_status, 2,
msg_s1,
"volume becoming 'available'",
volume, 'available')
self.verify_response_true(
volume.display_name.startswith('ostf-test-volume'),
'Step 3 failed: {msg}'.format(msg=msg_s1))
# create instance
instance = self.verify(200, self._create_server, 4,
"Instance creation failed. ",
"server creation",
self.compute_client)
self.verify(200, self._wait_for_instance_status, 5,
'Instance status did not become "available".',
"instance becoming 'available'",
instance, 'ACTIVE')
# Attach volume
self.verify(120, self._attach_volume_to_instance, 6,
'Volume couldn`t be attached.',
'volume attachment',
volume, instance.id)
self.verify(180, self._wait_for_volume_status, 7,
'Attached volume status did not become "in-use".',
"volume becoming 'in-use'",
volume, 'in-use')
# get volume details
self.verify(20, self.volume_client.volumes.get, 8,
"Can not retrieve volume details. ",
"retrieving volume details", volume.id)
# detach volume
self.verify(50, self._detach_volume, 9,
'Can not detach volume. ',
"volume detachment",
instance.id, volume.id)
self.verify(120, self._wait_for_volume_status, 10,
'Volume status did not become "available".',
"volume becoming 'available'",
volume, 'available')
self.verify(50, self.volume_client.volumes.delete, 11,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(50, self.verify_volume_deletion, 12,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(30, self._delete_server, 13,
"Can not delete server. ",
"server deletion",
instance)
def test_create_boot_volume(self):
"""Create volume and boot instance from it
Target component: Compute
Scenario:
1. Create a new small-size volume from image.
2. Wait for volume status to become "available".
3. Launch instance from created volume.
4. Wait for "Active" status.
5. Delete instance.
6. Delete volume.
7. Verify that volume deleted
Duration: 350 s.
"""
fail_msg_step_1 = 'Volume was not created'
# Create volume
volume = self.verify(120, self._create_boot_volume, 1,
fail_msg_step_1,
"volume creation",
self.volume_client)
self.verify(200, self._wait_for_volume_status, 2,
fail_msg_step_1,
"volume becoming 'available'",
volume, 'available')
# create instance
instance = self.verify(200, self.create_instance_from_volume, 3,
"Instance creation failed. ",
"server creation",
self.compute_client, volume)
self.verify(200, self._wait_for_instance_status, 4,
'Instance status did not become "available".',
"instance becoming 'available'",
instance, 'ACTIVE')
self.verify(30, self._delete_server, 5,
"Can not delete server. ",
"server deletion",
instance)
self.verify(50, self.volume_client.volumes.delete, 6,
'Can not delete volume. ',
"volume deletion",
volume)
self.verify(50, self.verify_volume_deletion, 7,
'Can not delete volume. ',
"volume deletion",
volume)
|
SteveHNH/ansible | refs/heads/devel | lib/ansible/modules/cloud/misc/rhevm.py | 17 | #!/usr/bin/python
# (c) 2016, Timothy Vandenbrande <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rhevm
author: Timothy Vandenbrande
short_description: RHEV/oVirt automation
description:
- This module only supports oVirt/RHEV version 3. A newer module M(ovirt_vms) supports oVirt/RHV version 4.
- Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
version_added: "2.2"
requirements:
- ovirtsdk
options:
user:
description:
- The user to authenticate with.
default: "admin@internal"
required: false
server:
description:
- The name/ip of your RHEV-m/oVirt instance.
default: "127.0.0.1"
required: false
port:
description:
- The port on which the API is reacheable.
default: "443"
required: false
insecure_api:
description:
- A boolean switch to make a secure or insecure connection to the server.
default: false
required: false
name:
description:
- The name of the VM.
cluster:
description:
- The rhev/ovirt cluster in which you want you VM to start.
required: false
datacenter:
description:
- The rhev/ovirt datacenter in which you want you VM to start.
required: false
default: "Default"
state:
description:
- This serves to create/remove/update or powermanage your VM.
default: "present"
required: false
choices: ['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']
image:
description:
- The template to use for the VM.
default: null
required: false
type:
description:
- To define if the VM is a server or desktop.
default: server
required: false
choices: [ 'server', 'desktop', 'host' ]
vmhost:
description:
- The host you wish your VM to run on.
required: false
vmcpu:
description:
- The number of CPUs you want in your VM.
default: "2"
required: false
cpu_share:
description:
- This parameter is used to configure the cpu share.
default: "0"
required: false
vmmem:
description:
- The amount of memory you want your VM to use (in GB).
default: "1"
required: false
osver:
description:
- The operationsystem option in RHEV/oVirt.
default: "rhel_6x64"
required: false
mempol:
description:
- The minimum amount of memory you wish to reserve for this system.
default: "1"
required: false
vm_ha:
description:
- To make your VM High Available.
default: true
required: false
disks:
description:
- This option uses complex arguments and is a list of disks with the options name, size and domain.
required: false
ifaces:
description:
- This option uses complex arguments and is a list of interfaces with the options name and vlan.
aliases: ['nics', 'interfaces']
required: false
boot_order:
description:
- This option uses complex arguments and is a list of items that specify the bootorder.
default: ["network","hd"]
required: false
del_prot:
description:
- This option sets the delete protection checkbox.
default: true
required: false
cd_drive:
description:
- The CD you wish to have mounted on the VM when I(state = 'CD').
default: null
required: false
timeout:
description:
- The timeout you wish to define for power actions.
- When I(state = 'up')
- When I(state = 'down')
- When I(state = 'restarted')
default: null
required: false
'''
RETURN = '''
vm:
description: Returns all of the VMs variables and execution.
returned: always
type: dict
sample: '{
"boot_order": [
"hd",
"network"
],
"changed": true,
"changes": [
"Delete Protection"
],
"cluster": "C1",
"cpu_share": "0",
"created": false,
"datacenter": "Default",
"del_prot": true,
"disks": [
{
"domain": "ssd-san",
"name": "OS",
"size": 40
}
],
"eth0": "00:00:5E:00:53:00",
"eth1": "00:00:5E:00:53:01",
"eth2": "00:00:5E:00:53:02",
"exists": true,
"failed": false,
"ifaces": [
{
"name": "eth0",
"vlan": "Management"
},
{
"name": "eth1",
"vlan": "Internal"
},
{
"name": "eth2",
"vlan": "External"
}
],
"image": false,
"mempol": "0",
"msg": [
"VM exists",
"cpu_share was already set to 0",
"VM high availability was already set to True",
"The boot order has already been set",
"VM delete protection has been set to True",
"Disk web2_Disk0_OS already exists",
"The VM starting host was already set to host416"
],
"name": "web2",
"type": "server",
"uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
"vm_ha": true,
"vmcpu": "4",
"vmhost": "host416",
"vmmem": "16"
}'
'''
EXAMPLES = '''
# basic get info from VM
- rhevm:
name: "demo"
user: "{{ rhev.admin.name }}"
password: "{{ rhev.admin.pass }}"
server: "rhevm01"
state: "info"
# basic create example from image
- rhevm:
name: "demo"
user: "{{ rhev.admin.name }}"
password: "{{ rhev.admin.pass }}"
server: "rhevm01"
state: "present"
image: "centos7_x64"
cluster: "centos"
# power management
- rhevm:
name: "uptime_server"
user: "{{ rhev.admin.name }}"
password: "{{ rhev.admin.pass }}"
server: "rhevm01"
cluster: "RH"
state: "down"
image: "centos7_x64"
cluster: "centos"
# multi disk, multi nic create example
- rhevm:
name: "server007"
user: "{{ rhev.admin.name }}"
password: "{{ rhev.admin.pass }}"
server: "rhevm01"
cluster: "RH"
state: "present"
type: "server"
vmcpu: 4
vmmem: 2
ifaces:
- name: "eth0"
vlan: "vlan2202"
- name: "eth1"
vlan: "vlan36"
- name: "eth2"
vlan: "vlan38"
- name: "eth3"
vlan: "vlan2202"
disks:
- name: "root"
size: 10
domain: "ssd-san"
- name: "swap"
size: 10
domain: "15kiscsi-san"
- name: "opt"
size: 10
domain: "15kiscsi-san"
- name: "var"
size: 10
domain: "10kiscsi-san"
- name: "home"
size: 10
domain: "sata-san"
boot_order:
- "network"
- "hd"
# add a CD to the disk cd_drive
- rhevm:
name: 'server007'
user: "{{ rhev.admin.name }}"
password: "{{ rhev.admin.pass }}"
state: 'cd'
cd_drive: 'rhev-tools-setup.iso'
# new host deployment + host network configuration
- rhevm:
name: "ovirt_node007"
password: "{{ rhevm.admin.pass }}"
type: "host"
state: present
cluster: "rhevm01"
ifaces:
- name: em1
- name: em2
- name: p3p1
ip: '172.31.224.200'
netmask: '255.255.254.0'
- name: p3p2
ip: '172.31.225.200'
netmask: '255.255.254.0'
- name: bond0
bond:
- em1
- em2
network: 'rhevm'
ip: '172.31.222.200'
netmask: '255.255.255.0'
management: True
- name: bond0.36
network: 'vlan36'
ip: '10.2.36.200'
netmask: '255.255.254.0'
gateway: '10.2.36.254'
- name: bond0.2202
network: 'vlan2202'
- name: bond0.38
network: 'vlan38'
'''
import time
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
HAS_SDK = True
except ImportError:
HAS_SDK = False
from ansible.module_utils.basic import AnsibleModule
RHEV_FAILED = 1
RHEV_SUCCESS = 0
RHEV_UNAVAILABLE = 2
RHEV_TYPE_OPTS = ['server', 'desktop', 'host']
STATE_OPTS = ['ping', 'present', 'absent', 'up', 'down', 'restart', 'cd', 'info']
global msg, changed, failed
msg = []
changed = False
failed = False
class RHEVConn(object):
'Connection to RHEV-M'
def __init__(self, module):
self.module = module
user = module.params.get('user')
password = module.params.get('password')
server = module.params.get('server')
port = module.params.get('port')
insecure_api = module.params.get('insecure_api')
url = "https://%s:%s" % (server, port)
try:
api = API(url=url, username=user, password=password, insecure=str(insecure_api))
api.test()
self.conn = api
except:
raise Exception("Failed to connect to RHEV-M.")
def __del__(self):
self.conn.disconnect()
def createVMimage(self, name, cluster, template):
try:
vmparams = params.VM(
name=name,
cluster=self.conn.clusters.get(name=cluster),
template=self.conn.templates.get(name=template),
disks=params.Disks(clone=True)
)
self.conn.vms.add(vmparams)
setMsg("VM is created")
setChanged()
return True
except Exception as e:
setMsg("Failed to create VM")
setMsg(str(e))
setFailed()
return False
def createVM(self, name, cluster, os, actiontype):
try:
vmparams = params.VM(
name=name,
cluster=self.conn.clusters.get(name=cluster),
os=params.OperatingSystem(type_=os),
template=self.conn.templates.get(name="Blank"),
type_=actiontype
)
self.conn.vms.add(vmparams)
setMsg("VM is created")
setChanged()
return True
except Exception as e:
setMsg("Failed to create VM")
setMsg(str(e))
setFailed()
return False
def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
VM = self.get_VM(vmname)
newdisk = params.Disk(
name=diskname,
size=1024 * 1024 * 1024 * int(disksize),
wipe_after_delete=True,
sparse=diskallocationtype,
interface=diskinterface,
format=diskformat,
bootable=diskboot,
storage_domains=params.StorageDomains(
storage_domain=[self.get_domain(diskdomain)]
)
)
try:
VM.disks.add(newdisk)
VM.update()
setMsg("Successfully added disk " + diskname)
setChanged()
except Exception as e:
setFailed()
setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
setMsg(str(e))
return False
try:
currentdisk = VM.disks.get(name=diskname)
attempt = 1
while currentdisk.status.state != 'ok':
currentdisk = VM.disks.get(name=diskname)
if attempt == 100:
setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
raise Exception()
else:
attempt += 1
time.sleep(2)
setMsg("The disk " + diskname + " is ready.")
except Exception as e:
setFailed()
setMsg("Error getting the state of " + diskname + ".")
setMsg(str(e))
return False
return True
def createNIC(self, vmname, nicname, vlan, interface):
VM = self.get_VM(vmname)
CLUSTER = self.get_cluster_byid(VM.cluster.id)
DC = self.get_DC_byid(CLUSTER.data_center.id)
newnic = params.NIC(
name=nicname,
network=DC.networks.get(name=vlan),
interface=interface
)
try:
VM.nics.add(newnic)
VM.update()
setMsg("Successfully added iface " + nicname)
setChanged()
except Exception as e:
setFailed()
setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
setMsg(str(e))
return False
try:
currentnic = VM.nics.get(name=nicname)
attempt = 1
while currentnic.active is not True:
currentnic = VM.nics.get(name=nicname)
if attempt == 100:
setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
raise Exception()
else:
attempt += 1
time.sleep(2)
setMsg("The iface " + nicname + " is ready.")
except Exception as e:
setFailed()
setMsg("Error getting the state of " + nicname + ".")
setMsg(str(e))
return False
return True
def get_DC(self, dc_name):
return self.conn.datacenters.get(name=dc_name)
def get_DC_byid(self, dc_id):
return self.conn.datacenters.get(id=dc_id)
def get_VM(self, vm_name):
return self.conn.vms.get(name=vm_name)
def get_cluster_byid(self, cluster_id):
return self.conn.clusters.get(id=cluster_id)
def get_cluster(self, cluster_name):
return self.conn.clusters.get(name=cluster_name)
def get_domain_byid(self, dom_id):
return self.conn.storagedomains.get(id=dom_id)
def get_domain(self, domain_name):
return self.conn.storagedomains.get(name=domain_name)
def get_disk(self, disk):
return self.conn.disks.get(disk)
def get_network(self, dc_name, network_name):
return self.get_DC(dc_name).networks.get(network_name)
def get_network_byid(self, network_id):
return self.conn.networks.get(id=network_id)
def get_NIC(self, vm_name, nic_name):
return self.get_VM(vm_name).nics.get(nic_name)
def get_Host(self, host_name):
return self.conn.hosts.get(name=host_name)
def get_Host_byid(self, host_id):
return self.conn.hosts.get(id=host_id)
def set_Memory(self, name, memory):
VM = self.get_VM(name)
VM.memory = int(int(memory) * 1024 * 1024 * 1024)
try:
VM.update()
setMsg("The Memory has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update memory.")
setMsg(str(e))
setFailed()
return False
def set_Memory_Policy(self, name, memory_policy):
VM = self.get_VM(name)
VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
try:
VM.update()
setMsg("The memory policy has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update memory policy.")
setMsg(str(e))
setFailed()
return False
def set_CPU(self, name, cpu):
VM = self.get_VM(name)
VM.cpu.topology.cores = int(cpu)
try:
VM.update()
setMsg("The number of CPUs has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update the number of CPUs.")
setMsg(str(e))
setFailed()
return False
def set_CPU_share(self, name, cpu_share):
VM = self.get_VM(name)
VM.cpu_shares = int(cpu_share)
try:
VM.update()
setMsg("The CPU share has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update the CPU share.")
setMsg(str(e))
setFailed()
return False
def set_Disk(self, diskname, disksize, diskinterface, diskboot):
DISK = self.get_disk(diskname)
setMsg("Checking disk " + diskname)
if DISK.get_bootable() != diskboot:
try:
DISK.set_bootable(diskboot)
setMsg("Updated the boot option on the disk.")
setChanged()
except Exception as e:
setMsg("Failed to set the boot option on the disk.")
setMsg(str(e))
setFailed()
return False
else:
setMsg("The boot option of the disk is correct")
if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
try:
DISK.size = (1024 * 1024 * 1024 * int(disksize))
setMsg("Updated the size of the disk.")
setChanged()
except Exception as e:
setMsg("Failed to update the size of the disk.")
setMsg(str(e))
setFailed()
return False
elif int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
setMsg("Shrinking disks is not supported")
setMsg(str(e))
setFailed()
return False
else:
setMsg("The size of the disk is correct")
if str(DISK.interface) != str(diskinterface):
try:
DISK.interface = diskinterface
setMsg("Updated the interface of the disk.")
setChanged()
except Exception as e:
setMsg("Failed to update the interface of the disk.")
setMsg(str(e))
setFailed()
return False
else:
setMsg("The interface of the disk is correct")
return True
def set_NIC(self, vmname, nicname, newname, vlan, interface):
NIC = self.get_NIC(vmname, nicname)
VM = self.get_VM(vmname)
CLUSTER = self.get_cluster_byid(VM.cluster.id)
DC = self.get_DC_byid(CLUSTER.data_center.id)
NETWORK = self.get_network(str(DC.name), vlan)
checkFail()
if NIC.name != newname:
NIC.name = newname
setMsg('Updating iface name to ' + newname)
setChanged()
if str(NIC.network.id) != str(NETWORK.id):
NIC.set_network(NETWORK)
setMsg('Updating iface network to ' + vlan)
setChanged()
if NIC.interface != interface:
NIC.interface = interface
setMsg('Updating iface interface to ' + interface)
setChanged()
try:
NIC.update()
setMsg('iface has successfully been updated.')
except Exception as e:
setMsg("Failed to update the iface.")
setMsg(str(e))
setFailed()
return False
return True
def set_DeleteProtection(self, vmname, del_prot):
VM = self.get_VM(vmname)
VM.delete_protected = del_prot
try:
VM.update()
setChanged()
except Exception as e:
setMsg("Failed to update delete protection.")
setMsg(str(e))
setFailed()
return False
return True
def set_BootOrder(self, vmname, boot_order):
VM = self.get_VM(vmname)
bootorder = []
for device in boot_order:
bootorder.append(params.Boot(dev=device))
VM.os.boot = bootorder
try:
VM.update()
setChanged()
except Exception as e:
setMsg("Failed to update the boot order.")
setMsg(str(e))
setFailed()
return False
return True
def set_Host(self, host_name, cluster, ifaces):
HOST = self.get_Host(host_name)
CLUSTER = self.get_cluster(cluster)
if HOST is None:
setMsg("Host does not exist.")
ifacelist = dict()
networklist = []
manageip = ''
try:
for iface in ifaces:
try:
setMsg('creating host interface ' + iface['name'])
if 'management' in iface:
manageip = iface['ip']
if 'boot_protocol' not in iface:
if 'ip' in iface:
iface['boot_protocol'] = 'static'
else:
iface['boot_protocol'] = 'none'
if 'ip' not in iface:
iface['ip'] = ''
if 'netmask' not in iface:
iface['netmask'] = ''
if 'gateway' not in iface:
iface['gateway'] = ''
if 'network' in iface:
if 'bond' in iface:
bond = []
for slave in iface['bond']:
bond.append(ifacelist[slave])
try:
tmpiface = params.Bonding(
slaves = params.Slaves(host_nic = bond),
options = params.Options(
option = [
params.Option(name = 'miimon', value = '100'),
params.Option(name = 'mode', value = '4')
]
)
)
except Exception as e:
setMsg('Failed to create the bond for ' + iface['name'])
setFailed()
setMsg(str(e))
return False
try:
tmpnetwork = params.HostNIC(
network = params.Network(name = iface['network']),
name = iface['name'],
boot_protocol = iface['boot_protocol'],
ip = params.IP(
address = iface['ip'],
netmask = iface['netmask'],
gateway = iface['gateway']
),
override_configuration = True,
bonding = tmpiface)
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
except Exception as e:
setMsg('Failed to set' + iface['name'] + ' as network interface')
setFailed()
setMsg(str(e))
return False
else:
tmpnetwork = params.HostNIC(
network = params.Network(name = iface['network']),
name = iface['name'],
boot_protocol = iface['boot_protocol'],
ip = params.IP(
address = iface['ip'],
netmask = iface['netmask'],
gateway = iface['gateway']
))
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
else:
tmpiface = params.HostNIC(
name=iface['name'],
network=params.Network(),
boot_protocol=iface['boot_protocol'],
ip=params.IP(
address=iface['ip'],
netmask=iface['netmask'],
gateway=iface['gateway']
))
ifacelist[iface['name']] = tmpiface
except Exception as e:
setMsg('Failed to set ' + iface['name'])
setFailed()
setMsg(str(e))
return False
except Exception as e:
setMsg('Failed to set networks')
setMsg(str(e))
setFailed()
return False
if manageip == '':
setMsg('No management network is defined')
setFailed()
return False
try:
HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
if self.conn.hosts.add(HOST):
setChanged()
HOST = self.get_Host(host_name)
state = HOST.status.state
while (state != 'non_operational' and state != 'up'):
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
if state == 'non_responsive':
setMsg('Failed to add host to RHEVM')
setFailed()
return False
setMsg('status host: up')
time.sleep(5)
HOST = self.get_Host(host_name)
state = HOST.status.state
setMsg('State before setting to maintenance: ' + str(state))
HOST.deactivate()
while state != 'maintenance':
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
setMsg('status host: maintenance')
try:
HOST.nics.setupnetworks(params.Action(
force=True,
check_connectivity = False,
host_nics = params.HostNics(host_nic = networklist)
))
setMsg('nics are set')
except Exception as e:
setMsg('Failed to apply networkconfig')
setFailed()
setMsg(str(e))
return False
try:
HOST.commitnetconfig()
setMsg('Network config is saved')
except Exception as e:
setMsg('Failed to save networkconfig')
setFailed()
setMsg(str(e))
return False
except Exception as e:
if 'The Host name is already in use' in str(e):
setMsg("Host already exists")
else:
setMsg("Failed to add host")
setFailed()
setMsg(str(e))
return False
HOST.activate()
while state != 'up':
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
if state == 'non_responsive':
setMsg('Failed to apply networkconfig.')
setFailed()
return False
setMsg('status host: up')
else:
setMsg("Host exists.")
return True
def del_NIC(self, vmname, nicname):
return self.get_NIC(vmname, nicname).delete()
def remove_VM(self, vmname):
VM = self.get_VM(vmname)
try:
VM.delete()
except Exception as e:
setMsg("Failed to remove VM.")
setMsg(str(e))
setFailed()
return False
return True
def start_VM(self, vmname, timeout):
VM = self.get_VM(vmname)
try:
VM.start()
except Exception as e:
setMsg("Failed to start VM.")
setMsg(str(e))
setFailed()
return False
return self.wait_VM(vmname, "up", timeout)
def wait_VM(self, vmname, state, timeout):
VM = self.get_VM(vmname)
while VM.status.state != state:
VM = self.get_VM(vmname)
time.sleep(10)
if timeout is not False:
timeout -= 10
if timeout <= 0:
setMsg("Timeout expired")
setFailed()
return False
return True
def stop_VM(self, vmname, timeout):
VM = self.get_VM(vmname)
try:
VM.stop()
except Exception as e:
setMsg("Failed to stop VM.")
setMsg(str(e))
setFailed()
return False
return self.wait_VM(vmname, "down", timeout)
def set_CD(self, vmname, cd_drive):
VM = self.get_VM(vmname)
try:
if str(VM.status.state) == 'down':
cdrom = params.CdRom(file=cd_drive)
VM.cdroms.add(cdrom)
setMsg("Attached the image.")
setChanged()
else:
cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
cdrom.set_file(cd_drive)
cdrom.update(current=True)
setMsg("Attached the image.")
setChanged()
except Exception as e:
setMsg("Failed to attach image.")
setMsg(str(e))
setFailed()
return False
return True
def set_VM_Host(self, vmname, vmhost):
VM = self.get_VM(vmname)
HOST = self.get_Host(vmhost)
try:
VM.placement_policy.host = HOST
VM.update()
setMsg("Set startup host to " + vmhost)
setChanged()
except Exception as e:
setMsg("Failed to set startup host.")
setMsg(str(e))
setFailed()
return False
return True
def migrate_VM(self, vmname, vmhost):
VM = self.get_VM(vmname)
HOST = self.get_Host_byid(VM.host.id)
if str(HOST.name) != vmhost:
try:
VM.migrate(
action=params.Action(
host=params.Host(
name=vmhost,
)
),
)
setChanged()
setMsg("VM migrated to " + vmhost)
except Exception as e:
setMsg("Failed to set startup host.")
setMsg(str(e))
setFailed()
return False
return True
def remove_CD(self, vmname):
VM = self.get_VM(vmname)
try:
VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
setMsg("Removed the image.")
setChanged()
except Exception as e:
setMsg("Failed to remove the image.")
setMsg(str(e))
setFailed()
return False
return True
class RHEV(object):
def __init__(self, module):
self.module = module
def __get_conn(self):
self.conn = RHEVConn(self.module)
return self.conn
def test(self):
self.__get_conn()
return "OK"
def getVM(self, name):
self.__get_conn()
VM = self.conn.get_VM(name)
if VM:
vminfo = dict()
vminfo['uuid'] = VM.id
vminfo['name'] = VM.name
vminfo['status'] = VM.status.state
vminfo['cpu_cores'] = VM.cpu.topology.cores
vminfo['cpu_sockets'] = VM.cpu.topology.sockets
vminfo['cpu_shares'] = VM.cpu_shares
vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
vminfo['os'] = VM.get_os().type_
vminfo['del_prot'] = VM.delete_protected
try:
vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
except Exception:
vminfo['host'] = None
vminfo['boot_order'] = []
for boot_dev in VM.os.get_boot():
vminfo['boot_order'].append(str(boot_dev.dev))
vminfo['disks'] = []
for DISK in VM.disks.list():
disk = dict()
disk['name'] = DISK.name
disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
disk['interface'] = DISK.interface
vminfo['disks'].append(disk)
vminfo['ifaces'] = []
for NIC in VM.nics.list():
iface = dict()
iface['name'] = str(NIC.name)
iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
iface['interface'] = NIC.interface
iface['mac'] = NIC.mac.address
vminfo['ifaces'].append(iface)
vminfo[str(NIC.name)] = NIC.mac.address
CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
if CLUSTER:
vminfo['cluster'] = CLUSTER.name
else:
vminfo = False
return vminfo
def createVMimage(self, name, cluster, template, disks):
self.__get_conn()
return self.conn.createVMimage(name, cluster, template, disks)
def createVM(self, name, cluster, os, actiontype):
self.__get_conn()
return self.conn.createVM(name, cluster, os, actiontype)
def setMemory(self, name, memory):
self.__get_conn()
return self.conn.set_Memory(name, memory)
def setMemoryPolicy(self, name, memory_policy):
self.__get_conn()
return self.conn.set_Memory_Policy(name, memory_policy)
def setCPU(self, name, cpu):
self.__get_conn()
return self.conn.set_CPU(name, cpu)
def setCPUShare(self, name, cpu_share):
self.__get_conn()
return self.conn.set_CPU_share(name, cpu_share)
def setDisks(self, name, disks):
self.__get_conn()
counter = 0
bootselect = False
for disk in disks:
if 'bootable' in disk:
if disk['bootable'] is True:
bootselect = True
for disk in disks:
diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
disksize = disk.get('size', 1)
diskdomain = disk.get('domain', None)
if diskdomain is None:
setMsg("`domain` is a required disk key.")
setFailed()
return False
diskinterface = disk.get('interface', 'virtio')
diskformat = disk.get('format', 'raw')
diskallocationtype = disk.get('thin', False)
diskboot = disk.get('bootable', False)
if bootselect is False and counter == 0:
diskboot = True
DISK = self.conn.get_disk(diskname)
if DISK is None:
self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
else:
self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
checkFail()
counter += 1
return True
def setNetworks(self, vmname, ifaces):
self.__get_conn()
VM = self.conn.get_VM(vmname)
counter = 0
length = len(ifaces)
for NIC in VM.nics.list():
if counter < length:
iface = ifaces[counter]
name = iface.get('name', None)
if name is None:
setMsg("`name` is a required iface key.")
setFailed()
elif str(name) != str(NIC.name):
setMsg("ifaces are in the wrong order, rebuilding everything.")
for NIC in VM.nics.list():
self.conn.del_NIC(vmname, NIC.name)
self.setNetworks(vmname, ifaces)
checkFail()
return True
vlan = iface.get('vlan', None)
if vlan is None:
setMsg("`vlan` is a required iface key.")
setFailed()
checkFail()
interface = iface.get('interface', 'virtio')
self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
else:
self.conn.del_NIC(vmname, NIC.name)
counter += 1
checkFail()
while counter < length:
iface = ifaces[counter]
name = iface.get('name', None)
if name is None:
setMsg("`name` is a required iface key.")
setFailed()
vlan = iface.get('vlan', None)
if vlan is None:
setMsg("`vlan` is a required iface key.")
setFailed()
if failed is True:
return False
interface = iface.get('interface', 'virtio')
self.conn.createNIC(vmname, name, vlan, interface)
counter += 1
checkFail()
return True
def setDeleteProtection(self, vmname, del_prot):
self.__get_conn()
VM = self.conn.get_VM(vmname)
if bool(VM.delete_protected) != bool(del_prot):
self.conn.set_DeleteProtection(vmname, del_prot)
checkFail()
setMsg("`delete protection` has been updated.")
else:
setMsg("`delete protection` already has the right value.")
return True
def setBootOrder(self, vmname, boot_order):
self.__get_conn()
VM = self.conn.get_VM(vmname)
bootorder = []
for boot_dev in VM.os.get_boot():
bootorder.append(str(boot_dev.dev))
if boot_order != bootorder:
self.conn.set_BootOrder(vmname, boot_order)
setMsg('The boot order has been set')
else:
setMsg('The boot order has already been set')
return True
def removeVM(self, vmname):
self.__get_conn()
self.setPower(vmname, "down", 300)
return self.conn.remove_VM(vmname)
def setPower(self, vmname, state, timeout):
self.__get_conn()
VM = self.conn.get_VM(vmname)
if VM is None:
setMsg("VM does not exist.")
setFailed()
return False
if state == VM.status.state:
setMsg("VM state was already " + state)
else:
if state == "up":
setMsg("VM is going to start")
self.conn.start_VM(vmname, timeout)
setChanged()
elif state == "down":
setMsg("VM is going to stop")
self.conn.stop_VM(vmname, timeout)
setChanged()
elif state == "restarted":
self.setPower(vmname, "down", timeout)
checkFail()
self.setPower(vmname, "up", timeout)
checkFail()
setMsg("the vm state is set to " + state)
return True
def setCD(self, vmname, cd_drive):
self.__get_conn()
if cd_drive:
return self.conn.set_CD(vmname, cd_drive)
else:
return self.conn.remove_CD(vmname)
def setVMHost(self, vmname, vmhost):
self.__get_conn()
return self.conn.set_VM_Host(vmname, vmhost)
# pylint: disable=unreachable
VM = self.conn.get_VM(vmname)
HOST = self.conn.get_Host(vmhost)
if VM.placement_policy.host is None:
self.conn.set_VM_Host(vmname, vmhost)
elif str(VM.placement_policy.host.id) != str(HOST.id):
self.conn.set_VM_Host(vmname, vmhost)
else:
setMsg("VM's startup host was already set to " + vmhost)
checkFail()
if str(VM.status.state) == "up":
self.conn.migrate_VM(vmname, vmhost)
checkFail()
return True
def setHost(self, hostname, cluster, ifaces):
self.__get_conn()
return self.conn.set_Host(hostname, cluster, ifaces)
def checkFail():
if failed:
module.fail_json(msg=msg)
else:
return True
def setFailed():
global failed
failed = True
def setChanged():
global changed
changed = True
def setMsg(message):
global failed
msg.append(message)
def core(module):
r = RHEV(module)
state = module.params.get('state', 'present')
if state == 'ping':
r.test()
return RHEV_SUCCESS, {"ping": "pong"}
elif state == 'info':
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
vminfo = r.getVM(name)
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
elif state == 'present':
created = False
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
actiontype = module.params.get('type')
if actiontype == 'server' or actiontype == 'desktop':
vminfo = r.getVM(name)
if vminfo:
setMsg('VM exists')
else:
# Create VM
cluster = module.params.get('cluster')
if cluster is None:
setMsg("cluster is a required argument.")
setFailed()
template = module.params.get('image')
if template:
disks = module.params.get('disks')
if disks is None:
setMsg("disks is a required argument.")
setFailed()
checkFail()
if r.createVMimage(name, cluster, template, disks) is False:
return RHEV_FAILED, vminfo
else:
os = module.params.get('osver')
if os is None:
setMsg("osver is a required argument.")
setFailed()
checkFail()
if r.createVM(name, cluster, os, actiontype) is False:
return RHEV_FAILED, vminfo
created = True
# Set MEMORY and MEMORY POLICY
vminfo = r.getVM(name)
memory = module.params.get('vmmem')
if memory is not None:
memory_policy = module.params.get('mempol')
if int(memory_policy) == 0:
memory_policy = memory
mem_pol_nok = True
if int(vminfo['mem_pol']) == int(memory_policy):
setMsg("Memory is correct")
mem_pol_nok = False
mem_nok = True
if int(vminfo['memory']) == int(memory):
setMsg("Memory is correct")
mem_nok = False
if memory_policy > memory:
setMsg('memory_policy cannot have a higher value than memory.')
return RHEV_FAILED, msg
if mem_nok and mem_pol_nok:
if int(memory_policy) > int(vminfo['memory']):
r.setMemory(vminfo['name'], memory)
r.setMemoryPolicy(vminfo['name'], memory_policy)
else:
r.setMemoryPolicy(vminfo['name'], memory_policy)
r.setMemory(vminfo['name'], memory)
elif mem_nok:
r.setMemory(vminfo['name'], memory)
elif mem_pol_nok:
r.setMemoryPolicy(vminfo['name'], memory_policy)
checkFail()
# Set CPU
cpu = module.params.get('vmcpu')
if int(vminfo['cpu_cores']) == int(cpu):
setMsg("Number of CPUs is correct")
else:
if r.setCPU(vminfo['name'], cpu) is False:
return RHEV_FAILED, msg
# Set CPU SHARE
cpu_share = module.params.get('cpu_share')
if cpu_share is not None:
if int(vminfo['cpu_shares']) == int(cpu_share):
setMsg("CPU share is correct.")
else:
if r.setCPUShare(vminfo['name'], cpu_share) is False:
return RHEV_FAILED, msg
# Set DISKS
disks = module.params.get('disks')
if disks is not None:
if r.setDisks(vminfo['name'], disks) is False:
return RHEV_FAILED, msg
# Set NETWORKS
ifaces = module.params.get('ifaces', None)
if ifaces is not None:
if r.setNetworks(vminfo['name'], ifaces) is False:
return RHEV_FAILED, msg
# Set Delete Protection
del_prot = module.params.get('del_prot')
if r.setDeleteProtection(vminfo['name'], del_prot) is False:
return RHEV_FAILED, msg
# Set Boot Order
boot_order = module.params.get('boot_order')
if r.setBootOrder(vminfo['name'], boot_order) is False:
return RHEV_FAILED, msg
# Set VM Host
vmhost = module.params.get('vmhost')
if vmhost is not False and vmhost is not "False":
if r.setVMHost(vminfo['name'], vmhost) is False:
return RHEV_FAILED, msg
vminfo = r.getVM(name)
vminfo['created'] = created
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
if actiontype == 'host':
cluster = module.params.get('cluster')
if cluster is None:
setMsg("cluster is a required argument.")
setFailed()
ifaces = module.params.get('ifaces')
if ifaces is None:
setMsg("ifaces is a required argument.")
setFailed()
if r.setHost(name, cluster, ifaces) is False:
return RHEV_FAILED, msg
return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
elif state == 'absent':
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
actiontype = module.params.get('type')
if actiontype == 'server' or actiontype == 'desktop':
vminfo = r.getVM(name)
if vminfo:
setMsg('VM exists')
# Set Delete Protection
del_prot = module.params.get('del_prot')
if r.setDeleteProtection(vminfo['name'], del_prot) is False:
return RHEV_FAILED, msg
# Remove VM
if r.removeVM(vminfo['name']) is False:
return RHEV_FAILED, msg
setMsg('VM has been removed.')
vminfo['state'] = 'DELETED'
else:
setMsg('VM was already removed.')
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
elif state == 'up' or state == 'down' or state == 'restarted':
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
timeout = module.params.get('timeout')
if r.setPower(name, state, timeout) is False:
return RHEV_FAILED, msg
vminfo = r.getVM(name)
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
elif state == 'cd':
name = module.params.get('name')
cd_drive = module.params.get('cd_drive')
if r.setCD(name, cd_drive) is False:
return RHEV_FAILED, msg
return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
def main():
global module
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
user = dict(default="admin@internal"),
password = dict(required=True, no_log=True),
server = dict(default="127.0.0.1"),
port = dict(default="443"),
insecure_api = dict(default=False, type='bool'),
name = dict(),
image = dict(default=False),
datacenter = dict(default="Default"),
type = dict(default="server", choices=['server', 'desktop', 'host']),
cluster = dict(default=''),
vmhost = dict(default=False),
vmcpu = dict(default="2"),
vmmem = dict(default="1"),
disks = dict(),
osver = dict(default="rhel_6x64"),
ifaces = dict(aliases=['nics', 'interfaces']),
timeout = dict(default=False),
mempol = dict(default="1"),
vm_ha = dict(default=True),
cpu_share = dict(default="0"),
boot_order = dict(default=["network", "hd"]),
del_prot = dict(default=True, type="bool"),
cd_drive = dict(default=False)
),
)
if not HAS_SDK:
module.fail_json(
msg='The `ovirtsdk` module is not importable. Check the requirements.'
)
rc = RHEV_SUCCESS
try:
rc, result = core(module)
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
CodeJuan/scrapy | refs/heads/master | scrapy/http/response/text.py | 21 | """
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
from six.moves.urllib.parse import urljoin
from w3lib.encoding import html_to_unicode, resolve_encoding, \
html_body_declared_encoding, http_content_type_encoding
from scrapy.http.response import Response
from scrapy.utils.response import get_base_url
from scrapy.utils.python import memoizemethod_noargs
class TextResponse(Response):
_DEFAULT_ENCODING = 'ascii'
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
self._cached_selector = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode url - %s has no encoding' %
type(self).__name__)
self._url = url.encode(self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = ''
if isinstance(body, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._declared_encoding() or self._body_inferred_encoding()
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
# check for self.encoding before _cached_ubody just in
# _body_inferred_encoding is called
benc = self.encoding
if self._cached_ubody is None:
charset = 'charset=%s' % benc
self._cached_ubody = html_to_unicode(charset, self.body)[1]
return self._cached_ubody
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(get_base_url(self), url)
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get('Content-Type')
return http_content_type_encoding(content_type)
def _body_inferred_encoding(self):
if self._cached_benc is None:
content_type = self.headers.get('Content-Type')
benc, ubody = html_to_unicode(content_type, self.body, \
auto_detect_fun=self._auto_detect_fun, \
default_encoding=self._DEFAULT_ENCODING)
self._cached_benc = benc
self._cached_ubody = ubody
return self._cached_benc
def _auto_detect_fun(self, text):
for enc in (self._DEFAULT_ENCODING, 'utf-8', 'cp1252'):
try:
text.decode(enc)
except UnicodeError:
continue
return resolve_encoding(enc)
@memoizemethod_noargs
def _body_declared_encoding(self):
return html_body_declared_encoding(self.body)
@property
def selector(self):
from scrapy.selector import Selector
if self._cached_selector is None:
self._cached_selector = Selector(self)
return self._cached_selector
def xpath(self, query):
return self.selector.xpath(query)
def css(self, query):
return self.selector.css(query)
|
cryptobanana/ansible | refs/heads/devel | lib/ansible/modules/system/user.py | 11 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: user
author:
- Stephen Fromm (@sfromm)
version_added: "0.2"
short_description: Manage user accounts
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
are present at runtime. If they are not, a descriptive error message will be shown.
- For Windows targets, use the M(win_user) module instead.
description:
- Manage user accounts and user attributes.
- For Windows targets, use the M(win_user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
required: true
aliases: [ user ]
comment:
description:
- Optionally sets the description (aka I(GECOS)) of user account.
uid:
description:
- Optionally sets the I(UID) of the user.
non_unique:
description:
- Optionally when used with the -u option, this option allows to
change the user ID to a non-unique value.
type: bool
default: "no"
version_added: "1.1"
seuser:
description:
- Optionally sets the seuser type (user_u) on selinux enabled systems.
version_added: "2.1"
group:
description:
- Optionally sets the user's primary group (takes a group name).
groups:
description:
- Puts the user in list of groups. When set to the empty string ('groups='),
the user is removed from all groups except the primary group.
- Before version 2.3, the only input format allowed was a 'comma separated string',
now it should be able to accept YAML lists also.
append:
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
type: bool
default: "no"
shell:
description:
- Optionally set the user's shell.
- On Mac OS X, before version 2.5, the default shell for non-system users was
/usr/bin/false. Since 2.5, the default shell for non-system users on
Mac OS X is /bin/bash.
home:
description:
- Optionally set the user's home directory.
skeleton:
description:
- Optionally set a home skeleton directory. Requires create_home option!
version_added: "2.0"
password:
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
like in a playbook. See U(http://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
state:
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
choices: [ absent, present ]
default: present
create_home:
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
- Changed from C(createhome) to C(create_home) in version 2.5.
type: bool
default: 'yes'
aliases: ['createhome']
move_home:
description:
- If set to C(yes) when used with C(home=), attempt to move the
user's home directory to the specified directory if it isn't there
already.
type: bool
default: "no"
system:
description:
- When creating an account, setting this to C(yes) makes the user a
system account. This setting cannot be changed on existing users.
type: bool
default: "no"
force:
description:
- When used with C(state=absent), behavior is as with C(userdel --force).
type: bool
default: "no"
login_class:
description:
- Optionally sets the user's login class for FreeBSD, OpenBSD and NetBSD systems.
remove:
description:
- When used with C(state=absent), behavior is as with C(userdel --remove).
type: bool
default: "no"
generate_ssh_key:
description:
- Whether to generate a SSH key for the user in question.
This will B(not) overwrite an existing SSH key.
type: bool
default: "no"
version_added: "0.9"
ssh_key_bits:
description:
- Optionally specify number of bits in SSH key to create.
default: default set by ssh-keygen
version_added: "0.9"
ssh_key_type:
description:
- Optionally specify the type of SSH key to generate.
Available SSH key types will depend on implementation
present on target host.
default: rsa
version_added: "0.9"
ssh_key_file:
description:
- Optionally specify the SSH key filename. If this is a relative
filename then it will be relative to the user's home directory.
default: .ssh/id_rsa
version_added: "0.9"
ssh_key_comment:
description:
- Optionally define the comment for the SSH key.
default: ansible-generated on $HOSTNAME
version_added: "0.9"
ssh_key_passphrase:
description:
- Set a passphrase for the SSH key. If no
passphrase is provided, the SSH key will default to
having no passphrase.
version_added: "0.9"
update_password:
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
choices: [ always, on_create ]
default: always
version_added: "1.3"
expires:
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
Currently supported on Linux and FreeBSD.
version_added: "1.9"
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
This is useful in environments that use centralized authentification when you want to manipulate the local users.
I.E. it uses `luseradd` instead of `useradd`.
- This requires that these commands exist on the targeted host, otherwise it will be a fatal error.
type: bool
default: 'no'
version_added: "2.4"
'''
EXAMPLES = '''
- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
user:
name: johnd
comment: John Doe
uid: 1040
group: admin
- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
user:
name: james
shell: /bin/bash
groups: admins,developers
append: yes
- name: Remove the user 'johnd'
user:
name: johnd
state: absent
remove: yes
- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
user:
name: jsmith
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_file: .ssh/id_rsa
- name: Added a consultant whose account you want to expire
user:
name: james18
shell: /bin/zsh
groups: developers
expires: 1422403387
'''
import grp
import os
import platform
import pwd
import shutil
import socket
import time
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import load_platform_subclass, AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
import spwd
HAVE_SPWD = True
except:
HAVE_SPWD = False
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.create_home = module.params['create_home']
self.move_home = module.params['move_home']
self.skeleton = module.params['skeleton']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
self.groups = None
self.local = module.params['local']
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
if module.params['expires']:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception:
e = get_exception()
module.fail_json(msg="Invalid expires time %s: %s" % (self.expires, e))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
if self.module.check_mode and obey_checkmode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
else:
# cast all args to strings ansible-modules-core/issues/4397
cmd = [str(x) for x in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
if self.local:
command_name = 'luserdel'
else:
command_name = 'userdel'
cmd = [self.module.get_bin_path(command_name, True)]
if self.force:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self):
if self.local:
command_name = 'luseradd'
else:
command_name = 'useradd'
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.seuser is not None:
cmd.append('-Z')
cmd.append(self.seuser)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release <= 5:
cmd.append('-n')
else:
cmd.append('-N')
elif os.path.exists('/etc/SuSE-release'):
# -N did not exist in useradd before SLE 11 and did not
# automatically create a group
dist = platform.dist()
major_release = int(dist[1].split('.')[0])
if major_release >= 12:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
if not self.local:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def _check_usermod_append(self):
# check if this version of usermod can append groups
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
usermod_path = self.module.get_bin_path(command_name, True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path, '--help']
(rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
helpout = data1 + data2
# check if --append exists
lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
# get a list of all groups for the user, including the primary
current_groups = self.user_group_membership(exclude_primary=False)
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def group_exists(self, group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(x.strip() for x in self.groups.split(',') if x)
for g in groups.copy():
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self, exclude_primary=True):
''' Return a list of groups the user belongs to '''
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem:
# Exclude the user's primary group by default
if not exclude_primary:
groups.append(group[0])
else:
if info[3] != group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()
return info
def user_password(self):
passwd = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
except KeyError:
return passwd
if not self.user_exists():
return passwd
elif self.SHADOWFILE:
# Read shadow file for user's encrypted password string
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
return passwd
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
if not os.path.exists(info[5]) and not self.module.check_mode:
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
if self.module.check_mode:
return (0, '', '')
try:
os.mkdir(ssh_dir, int('0700', 8))
os.chown(ssh_dir, info[2], info[3])
except OSError:
e = get_exception()
return (1, '', 'Failed to create %s: %s' % (ssh_dir, str(e)))
if os.path.exists(ssh_key_file):
return (None, 'Key already exists', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
if self.ssh_bits > 0:
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
cmd.append('-N')
if self.ssh_passphrase is not None:
cmd.append(self.ssh_passphrase)
else:
cmd.append('')
(rc, out, err) = self.execute_command(cmd)
if rc == 0 and not self.module.check_mode:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd, obey_checkmode=False)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
f = open(ssh_public_key_file)
ssh_public_key = f.read().strip()
f.close()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
if self.skeleton is not None:
skeleton = self.skeleton
else:
skeleton = '/etc/skel'
if os.path.exists(skeleton):
try:
shutil.copytree(skeleton, path, symlinks=True)
except OSError:
e = get_exception()
self.module.exit_json(failed=True, msg="%s" % e)
else:
try:
os.makedirs(path)
except OSError:
e = get_exception()
self.module.exit_json(failed=True, msg="%s" % e)
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError:
e = get_exception()
self.module.exit_json(failed=True, msg="%s" % e)
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires:
days = (time.mktime(self.expires) - time.time()) // 86400
cmd.append('-e')
cmd.append(str(int(days)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires:
days = (time.mktime(self.expires) - time.time()) // 86400
cmd.append('-e')
cmd.append(str(int(days)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
return self.execute_command(cmd)
return (rc, out, err)
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None and self.password != '*':
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-S'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-G'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None \
and self.password != '*' and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
def get_password_defaults(self):
# Read password aging defaults
try:
minweeks = ''
maxweeks = ''
warnweeks = ''
for line in open("/etc/default/passwd", 'r'):
line = line.strip()
if (line.startswith('#') or line == ''):
continue
key, value = line.split('=')
if key == "MINWEEKS":
minweeks = value.rstrip('\n')
elif key == "MAXWEEKS":
maxweeks = value.rstrip('\n')
elif key == "WARNWEEKS":
warnweeks = value.rstrip('\n')
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % str(err))
return (minweeks, maxweeks, warnweeks)
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
for line in open(self.SHADOWFILE, 'rb').readlines():
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
rc = 0
except Exception:
err = get_exception()
self.module.fail_json(msg="failed to update users password: %s" % str(err))
return (rc, out, err)
class DarwinUser(User):
"""
This is a Darwin Mac OS X User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
]
def _get_dscl(self):
return [self.module.get_bin_path('dscl', True), self.dscl_directory]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += ['-search', '/Groups', 'GroupMembership', self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, property]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
# sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([lines[1].strip()] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _get_next_uid(self, system=None):
'''
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
'''
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += ['-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if self.name not in hidden_users:
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del (hidden_users[hidden_users.index(self.name)])
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += ['-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += ['-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.create_home:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
# dscl sets shell to /usr/bin/false when UserShell is not specified
# so set the shell to /bin/bash when the user is not a system user
if not self.system and self.shell is None:
self.shell = '/bin/bash'
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc is not None:
return (rc, out + out2, err + err2)
else:
return (rc2, out + out2, err + err2)
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='str'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
local=dict(type='bool'),
),
supports_check_mode=True
)
user = User(module)
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
result['uid'] = info[2]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
|
JunhwanPark/TizenRT | refs/heads/artik | external/iotivity/iotivity_1.3-rel/build_common/iotivityconfig/__init__.py | 1 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# This file contains compiler tests for use in scons 'Configure'
# tests.
from compiler import factory
def _check_for_broken_gcc_headers(context, flag):
# Check for issue in some older (pre-C++11) C library headers that
# causes functions like snprintf() to remain undeclared when
# -std=c++0x or -ansi, for example, is added to the g++ command
# line flags, and despite the fact the appropriate feature test
# macro to make the prototypes visible is defined.
#
# Returns 1 if the broken headers were detected, 0 otherwise.
#
# This should only be called if the compiler is g++ (which it
# should be if we are here) and a flag was automatically appended
# to CXXFLAGS.
context.Message('Checking for broken GCC C headers when C++11 is enabled... ')
ret = '-std=gnu++' in flag
context.Result(ret)
if ret:
print('''
Warning: detected pre-C++11 GCC C header bugs.
See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=34032 for details.
''')
def _inform_user_of_broken_gcc_headers(context, flag):
# Informative tests used to inform the user of broken GCC headers.
# They are unnecessary for actual builds.
if flag is not 1 and flag is not 0:
# The flag is neither 1 nor 0, meaning it contains the
# automatically detected C++11 flag.
# Now verify that the compiler is actually GCC.
is_gcc = factory.check_for_gcc_cxx(context)
if is_gcc:
# This should only be called if the compiler is g++ and a
# flag was automatically appended to CXXFLAGS.
#
# We do not care if the user added a flag that triggers
# the header bug. It's the user's responsibility to
# handle the issue in that case.
_check_for_broken_gcc_headers(context, flag)
def check_c99_flags(context):
"""
Check if command line flag is required to enable C99 support.
Returns 1 if no flag is required, 0 if no flag was found, or the
actual flag if one was found.
"""
cc = context.env['CC']
context.Message('Checking for C99 flag for ' + cc + '... ')
config = factory.make_c_compiler_config(context)
ret = config.check_c99_flags()
context.Result(ret)
return ret
def check_cxx11_flags(context):
"""
Check if command line flag is required to enable C++11 support.
Returns 1 if no flag is required, 0 if no flag was found, or the
actual flag if one was found.
"""
cxx = context.env['CXX']
context.Message('Checking for C++11 flag for ' + cxx + '... ')
config = factory.make_cxx_compiler_config(context)
ret = config.check_cxx11_flags()
context.Result(ret)
# Let the user know if a workaround was enabled for broken GCC C
# headers when C++11 is enabled.
_inform_user_of_broken_gcc_headers(context, ret)
return ret
def check_pthreads(context):
"""
Check if pthreads are supported for this platform.
Sets POSIX_SUPPORTED based on the result.
"""
context.Message('Checking for POSIX Thread Support...')
config = factory.make_c_compiler_config(context)
ret = config.has_pthreads_support()
context.env['POSIX_SUPPORTED'] = ret
context.Result(ret)
return ret
|
Mortal/aiotkinter | refs/heads/master | aiotkinter/loop.py | 1 | import asyncio
import tkinter
class _TkinterSelector(asyncio.selectors._BaseSelectorImpl):
def __init__(self):
super().__init__()
self._tk = tkinter.Tk(useTk=0)
self._ready = []
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
mask = 0
if events & asyncio.selectors.EVENT_READ:
mask |= tkinter.READABLE
if events & asyncio.selectors.EVENT_WRITE:
mask |= tkinter.WRITABLE
def ready(fd, mask):
assert key.fd == fd
events = 0
if mask & tkinter.READABLE:
events |= asyncio.selectors.EVENT_READ
if mask & tkinter.WRITABLE:
events |= asyncio.selectors.EVENT_WRITE
self._ready.append((key, events))
self._tk.createfilehandler(key.fd, mask, ready)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
self._tk.deletefilehandler(key.fd)
return key
def select(self, timeout=None):
self._ready = []
if timeout is not None:
timeout = int(timeout*1000)
token = self._tk.createtimerhandler(timeout, lambda: True)
self._tk.dooneevent()
if timeout is not None:
token.deletetimerhandler()
return self._ready
class TkinterEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
def new_event_loop(self):
try:
return self._loop_factory(selector=_TkinterSelector())
except TypeError:
raise Exception('The default event loop is not a selector event loop')
|
mdhunter/twitter-to-wordcloud | refs/heads/master | twitter_to_corpus.py | 1 | #!/usr/bin/env python3
# Converts one or more Twitter archives into a combined corpus. This script
# performs no filtering.
#
# Copyright 2016 Mathew Hunter
import argparse
import json
import nltk
import re
import zipfile
# Processes the referenced archives to produce a corpus
def generate_corpus(archive_filenames):
# Process each archive, extracting words from it
corpus_words = []
tokenizer = nltk.tokenize.TweetTokenizer(preserve_case=False, strip_handles=True)
for archive_filename in archive_filenames:
# Pull the words from the archive
try:
archive_words = __extract_words_from_twitter_archive(archive_filename, tokenizer)
corpus_words.extend(archive_words)
except Exception as e:
print("There was an error extracting words from the archive '" + archive_filename + "': " + str(e))
raise
return corpus_words
# Pulls words from a Twitter archive
def __extract_words_from_twitter_archive(archive_filename, tokenizer):
# Open the archive and extract words from the content files within
corpus_words = []
with zipfile.ZipFile(archive_filename) as archive:
# Pull the Tweet content file names
content_files = []
try:
content_files = [name for name in archive.namelist() if re.match("data/js/tweets/.*", name)]
except Exception as e:
print("There was an error reading the archive: " + str(e))
raise
# Check if there are content files to process
if (len(content_files) < 1):
print("No data to process")
exit -1
# Process each file, dumping the words from each content file into the list
for content_file in content_files:
with archive.open(content_file) as file:
# Pull the raw data for the file
raw_string_data = str(file.read(), "utf-8")
string_data = raw_string_data[raw_string_data.index("["):]
# Pull the words
content_words = __extract_words_from_twitter_content(string_data, tokenizer)
corpus_words.extend(content_words)
return corpus_words
# Pulls words from Twitter content represented by JSON
def __extract_words_from_twitter_content(string_data, tokenizer):
# Load the JSON data as a collection of Tweet objects
tweets = []
try:
tweets = json.loads(string_data)
except Exception as e:
print("There was an error decoding the JSON content: " + str(e))
raise
# Process each Tweet, pulling words from the text content
content_words = []
for tweet in tweets:
# Tokenize the Tweet content and add found tokens
tokens = tokenizer.tokenize(tweet["text"])
content_words.extend(tokens)
return content_words
if __name__ == "__main__":
# Create an argument parser
parser = argparse.ArgumentParser(description="Unwraps one or more Twitter archives to produce a body of text")
parser.add_argument("source_file", nargs="+", help="the source file(s) to process")
# Parse the arguments and pull pertinent args
args = parser.parse_args()
archive_filenames = args.source_file
# Produce a corpus and output it
corpus_words = generate_corpus(archive_filenames)
print(" ".join(corpus_words))
|
LeoXu92/AppleSampleSpider | refs/heads/master | AppleSampleSpider.py | 1 | #coding: utf-8
import requests
import json
import sys
import datetime
import os
from contextlib import closing
def all_sample_code():
'''
library.json来源于https://developer.apple.com/library/content/navigation/library.json
"columns": { "name" : 0,
"id" : 1,
"type" : 2,
"date" : 3,
"updateSize" : 4,
"topic" : 5,
"framework" : 6,
"release" : 7,
"subtopic" : 8,
"url" : 9,
"sortOrder" : 10,
"displayDate": 11,
"platform" : 12,
},
但是columns中platform后面多了一个逗号,不符合json,需要删掉。
'''
f = open('library.json', 'r+')
return json.loads(f.read(), strict=False)
def get_download_url(item):
name = item[9].split('/')[2]
book_url = 'https://developer.apple.com/library/content/samplecode/%s/book.json' % name
r = requests.get(url=book_url)
print book_url
download_url = 'https://developer.apple.com/library/content/samplecode/%s/%s' % (name ,r.json()['sampleCode'])
return download_url.encode("utf-8")
def download_file(url, path):
if not os.path.exists(path):
os.makedirs(path)
start = datetime.datetime.now().replace(microsecond=0)
filename = url.split('/')[-1]
filepath = os.path.join(path,filename)
with closing(requests.get(url, stream=True)) as response:
chunk_size = 1024 # 单次请求最大值
content_size = int(response.headers['content-length'])
with open(filepath, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
end = datetime.datetime.now().replace(microsecond=0)
print '%s下载完成,用时:%s' % (filename, end-start)
if __name__ == '__main__':
codes = all_sample_code()
for x in codes['documents']:
if x[2] == 5:
download_url = get_download_url(x)
print 'download url:', download_url
download_file(download_url, 'files') |
Sorsly/subtle | refs/heads/master | google-cloud-sdk/lib/third_party/pygments/formatters/html.py | 27 | # -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): u'&',
ord('<'): u'<',
ord('>'): u'>',
ord('"'): u'"',
ord("'"): u''',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def get_random_id():
"""Return a random id for javascript fields."""
from random import random
from time import time
try:
from hashlib import sha1 as sha
except ImportError:
import sha
sha = sha.new
return sha('%s|%s' % (random(), time())).hexdigest()
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
*New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
wrapping table will have a CSS class of this string plus ``'table'``,
the default is accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``). *New in
Pygments 0.11.*
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file. *New in Pygments 0.6.*
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
*New in Pygments 1.1.*
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 0.11.*
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``). *New in
Pygments 0.6.*
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks. *New in Pygments
0.7.*
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines. *New in Pygments 0.9.*
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
**Subclassing the HTML formatter**
*New in Pygments 0.7.*
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, basestring):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.iteritems()
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print >>sys.stderr, 'Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name'
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError, err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title = self.title,
cssfile = self.cssfile,
encoding = self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title = self.title,
styledefs = self.get_style_defs('body'),
encoding = self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO.StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
if nocls:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td><div class="linenodiv" '
'style="background-color: #f0f0f0; padding-right: 10px">'
'<pre style="line-height: 125%">' +
ls + '</pre></div></td><td class="code">')
else:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if self.noclasses:
if sp:
for t, line in lines:
if num%sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s</span> ' % (
style, mw, (num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s</span> ' % (
num%sp == 0 and ' special' or '', mw,
(num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
i = self.linenostart - 1 # subtract 1 since we have to increment i *before* yielding
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
vileopratama/vitech | refs/heads/master | src/addons/mrp/report/bom_structure.py | 15 | ## -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import osv
from openerp.report import report_sxw
class bom_structure(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(bom_structure, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'get_children': self.get_children,
})
def get_children(self, object, level=0):
result = []
def _get_rec(object, level, qty=1.0):
for l in object:
res = {}
res['pname'] = l.product_id.name_get()[0][1]
res['pcode'] = l.product_id.default_code
res['pqty'] = l.product_qty * qty
res['uname'] = l.product_uom.name
res['level'] = level
res['code'] = l.bom_id.code
result.append(res)
if l.child_line_ids:
if level<6:
level += 1
_get_rec(l.child_line_ids, level, qty=res['pqty'])
if level>0 and level<6:
level -= 1
return result
children = _get_rec(object,level)
return children
class report_mrpbomstructure(osv.AbstractModel):
_name = 'report.mrp.report_mrpbomstructure'
_inherit = 'report.abstract_report'
_template = 'mrp.report_mrpbomstructure'
_wrapped_report_class = bom_structure
|
paulrouget/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pywebsocket/mod_pywebsocket/http_header_util.py | 23 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for parsing and formatting headers that follow the grammar defined
in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
"""
from six.moves import urllib
_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
def _is_char(c):
"""Returns true iff c is in CHAR as specified in HTTP RFC."""
return ord(c) <= 127
def _is_ctl(c):
"""Returns true iff c is in CTL as specified in HTTP RFC."""
return ord(c) <= 31 or ord(c) == 127
class ParsingState(object):
def __init__(self, data):
self.data = data
self.head = 0
def peek(state, pos=0):
"""Peeks the character at pos from the head of data."""
if state.head + pos >= len(state.data):
return None
return state.data[state.head + pos]
def consume(state, amount=1):
"""Consumes specified amount of bytes from the head and returns the
consumed bytes. If there's not enough bytes to consume, returns None.
"""
if state.head + amount > len(state.data):
return None
result = state.data[state.head:state.head + amount]
state.head = state.head + amount
return result
def consume_string(state, expected):
"""Given a parsing state and a expected string, consumes the string from
the head. Returns True if consumed successfully. Otherwise, returns
False.
"""
pos = 0
for c in expected:
if c != peek(state, pos):
return False
pos += 1
consume(state, pos)
return True
def consume_lws(state):
"""Consumes a LWS from the head. Returns True if any LWS is consumed.
Otherwise, returns False.
LWS = [CRLF] 1*( SP | HT )
"""
original_head = state.head
consume_string(state, '\r\n')
pos = 0
while True:
c = peek(state, pos)
if c == ' ' or c == '\t':
pos += 1
else:
if pos == 0:
state.head = original_head
return False
else:
consume(state, pos)
return True
def consume_lwses(state):
"""Consumes *LWS from the head."""
while consume_lws(state):
pass
def consume_token(state):
"""Consumes a token from the head. Returns the token or None if no token
was found.
"""
pos = 0
while True:
c = peek(state, pos)
if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
if pos == 0:
return None
return consume(state, pos)
else:
pos += 1
def consume_token_or_quoted_string(state):
"""Consumes a token or a quoted-string, and returns the token or unquoted
string. If no token or quoted-string was found, returns None.
"""
original_head = state.head
if not consume_string(state, '"'):
return consume_token(state)
result = []
expect_quoted_pair = False
while True:
if not expect_quoted_pair and consume_lws(state):
result.append(' ')
continue
c = consume(state)
if c is None:
# quoted-string is not enclosed with double quotation
state.head = original_head
return None
elif expect_quoted_pair:
expect_quoted_pair = False
if _is_char(c):
result.append(c)
else:
# Non CHAR character found in quoted-pair
state.head = original_head
return None
elif c == '\\':
expect_quoted_pair = True
elif c == '"':
return ''.join(result)
elif _is_ctl(c):
# Invalid character %r found in qdtext
state.head = original_head
return None
else:
result.append(c)
def quote_if_necessary(s):
"""Quotes arbitrary string into quoted-string."""
quote = False
if s == '':
return '""'
result = []
for c in s:
if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
quote = True
if c == '"' or _is_ctl(c):
result.append('\\' + c)
else:
result.append(c)
if quote:
return '"' + ''.join(result) + '"'
else:
return ''.join(result)
def parse_uri(uri):
"""Parse absolute URI then return host, port and resource."""
parsed = urllib.parse.urlsplit(uri)
if parsed.scheme != 'wss' and parsed.scheme != 'ws':
# |uri| must be a relative URI.
# TODO(toyoshim): Should validate |uri|.
return None, None, uri
if parsed.hostname is None:
return None, None, None
port = None
try:
port = parsed.port
except ValueError as e:
# port property cause ValueError on invalid null port description like
# 'ws://host:/path'.
return None, None, None
if port is None:
if parsed.scheme == 'ws':
port = 80
else:
port = 443
path = parsed.path
if not path:
path += '/'
if parsed.query:
path += '?' + parsed.query
if parsed.fragment:
path += '#' + parsed.fragment
return parsed.hostname, port, path
try:
urllib.parse.uses_netloc.index('ws')
except ValueError as e:
# urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
urllib.parse.uses_netloc.append('ws')
urllib.parse.uses_netloc.append('wss')
# vi:sts=4 sw=4 et
|
GdZ/scriptfile | refs/heads/master | software/googleAppEngine/lib/django_1_3/tests/modeltests/model_inheritance/tests.py | 51 | from operator import attrgetter
from django.core.exceptions import FieldError
from django.test import TestCase
from models import (Chef, CommonInfo, ItalianRestaurant, ParkingLot, Place,
Post, Restaurant, Student, StudentWorker, Supplier, Worker, MixinModel)
class ModelInheritanceTests(TestCase):
def test_abstract(self):
# The Student and Worker models both have 'name' and 'age' fields on
# them and inherit the __unicode__() method, just as with normal Python
# subclassing. This is useful if you want to factor out common
# information for programming purposes, but still completely
# independent separate models at the database level.
w1 = Worker.objects.create(name="Fred", age=35, job="Quarry worker")
w2 = Worker.objects.create(name="Barney", age=34, job="Quarry worker")
s = Student.objects.create(name="Pebbles", age=5, school_class="1B")
self.assertEqual(unicode(w1), "Worker Fred")
self.assertEqual(unicode(s), "Student Pebbles")
# The children inherit the Meta class of their parents (if they don't
# specify their own).
self.assertQuerysetEqual(
Worker.objects.values("name"), [
{"name": "Barney"},
{"name": "Fred"},
],
lambda o: o
)
# Since Student does not subclass CommonInfo's Meta, it has the effect
# of completely overriding it. So ordering by name doesn't take place
# for Students.
self.assertEqual(Student._meta.ordering, [])
# However, the CommonInfo class cannot be used as a normal model (it
# doesn't exist as a model).
self.assertRaises(AttributeError, lambda: CommonInfo.objects.all())
# A StudentWorker which does not exist is both a Student and Worker
# which does not exist.
self.assertRaises(Student.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
self.assertRaises(Worker.DoesNotExist,
StudentWorker.objects.get, pk=12321321
)
# MultipleObjectsReturned is also inherited.
# This is written out "long form", rather than using __init__/create()
# because of a bug with diamond inheritance (#10808)
sw1 = StudentWorker()
sw1.name = "Wilma"
sw1.age = 35
sw1.save()
sw2 = StudentWorker()
sw2.name = "Betty"
sw2.age = 24
sw2.save()
self.assertRaises(Student.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
self.assertRaises(Worker.MultipleObjectsReturned,
StudentWorker.objects.get, pk__lt=sw2.pk + 100
)
def test_multiple_table(self):
post = Post.objects.create(title="Lorem Ipsum")
# The Post model has distinct accessors for the Comment and Link models.
post.attached_comment_set.create(content="Save $ on V1agr@", is_spam=True)
post.attached_link_set.create(
content="The Web framework for perfections with deadlines.",
url="http://www.djangoproject.com/"
)
# The Post model doesn't have an attribute called
# 'attached_%(class)s_set'.
self.assertRaises(AttributeError,
getattr, post, "attached_%(class)s_set"
)
# The Place/Restaurant/ItalianRestaurant models all exist as
# independent models. However, the subclasses also have transparent
# access to the fields of their ancestors.
# Create a couple of Places.
p1 = Place.objects.create(name="Master Shakes", address="666 W. Jersey")
p2 = Place.objects.create(name="Ace Harware", address="1013 N. Ashland")
# Test constructor for Restaurant.
r = Restaurant.objects.create(
name="Demon Dogs",
address="944 W. Fullerton",
serves_hot_dogs=True,
serves_pizza=False,
rating=2
)
# Test the constructor for ItalianRestaurant.
c = Chef.objects.create(name="Albert")
ir = ItalianRestaurant.objects.create(
name="Ristorante Miron",
address="1234 W. Ash",
serves_hot_dogs=False,
serves_pizza=False,
serves_gnocchi=True,
rating=4,
chef=c
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Ash"), [
"Ristorante Miron",
],
attrgetter("name")
)
ir.address = "1234 W. Elm"
ir.save()
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Make sure Restaurant and ItalianRestaurant have the right fields in
# the right order.
self.assertEqual(
[f.name for f in Restaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef"]
)
self.assertEqual(
[f.name for f in ItalianRestaurant._meta.fields],
["id", "name", "address", "place_ptr", "rating", "serves_hot_dogs", "serves_pizza", "chef", "restaurant_ptr", "serves_gnocchi"],
)
self.assertEqual(Restaurant._meta.ordering, ["-rating"])
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a
# Restaurant object cannot access that reverse relation, since it's not
# part of the Place-Supplier Hierarchy.
self.assertQuerysetEqual(Place.objects.filter(supplier__name="foo"), [])
self.assertRaises(FieldError,
Restaurant.objects.filter, supplier__name="foo"
)
# Parent fields can be used directly in filters on the child model.
self.assertQuerysetEqual(
Restaurant.objects.filter(name="Demon Dogs"), [
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(address="1234 W. Elm"), [
"Ristorante Miron",
],
attrgetter("name")
)
# Filters against the parent model return objects of the parent's type.
p = Place.objects.get(name="Demon Dogs")
self.assertIs(type(p), Place)
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the
# child's name.
self.assertEqual(
p.restaurant, Restaurant.objects.get(name="Demon Dogs")
)
self.assertEqual(
Place.objects.get(name="Ristorante Miron").restaurant.italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
self.assertEqual(
Restaurant.objects.get(name="Ristorante Miron").italianrestaurant,
ItalianRestaurant.objects.get(name="Ristorante Miron")
)
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
self.assertRaises(ItalianRestaurant.DoesNotExist,
lambda: p.restaurant.italianrestaurant
)
# An ItalianRestaurant which does not exist is also a Place which does
# not exist.
self.assertRaises(Place.DoesNotExist,
ItalianRestaurant.objects.get, name="The Noodle Void"
)
# MultipleObjectsReturned is also inherited.
self.assertRaises(Place.MultipleObjectsReturned,
Restaurant.objects.get, id__lt=12321
)
# Related objects work just as they normally do.
s1 = Supplier.objects.create(name="Joe's Chickens", address="123 Sesame St")
s1.customers = [r, ir]
s2 = Supplier.objects.create(name="Luigi's Pasta", address="456 Sesame St")
s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's
# a Supplier).
p = Place.objects.get(name="Joe's Chickens")
self.assertRaises(Restaurant.DoesNotExist,
lambda: p.restaurant
)
self.assertEqual(p.supplier, s1)
self.assertQuerysetEqual(
ir.provider.order_by("-name"), [
"Luigi's Pasta",
"Joe's Chickens"
],
attrgetter("name")
)
self.assertQuerysetEqual(
Restaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
"Demon Dogs",
],
attrgetter("name")
)
self.assertQuerysetEqual(
ItalianRestaurant.objects.filter(provider__name__contains="Chickens"), [
"Ristorante Miron",
],
attrgetter("name"),
)
park1 = ParkingLot.objects.create(
name="Main St", address="111 Main St", main_site=s1
)
park2 = ParkingLot.objects.create(
name="Well Lit", address="124 Sesame St", main_site=ir
)
self.assertEqual(
Restaurant.objects.get(lot__name="Well Lit").name,
"Ristorante Miron"
)
# The update() command can update fields in parent and child classes at
# once (although it executed multiple SQL queries to do so).
rows = Restaurant.objects.filter(
serves_hot_dogs=True, name__contains="D"
).update(
name="Demon Puppies", serves_hot_dogs=False
)
self.assertEqual(rows, 1)
r1 = Restaurant.objects.get(pk=r.pk)
self.assertFalse(r1.serves_hot_dogs)
self.assertEqual(r1.name, "Demon Puppies")
# The values() command also works on fields from parent models.
self.assertQuerysetEqual(
ItalianRestaurant.objects.values("name", "rating"), [
{"rating": 4, "name": "Ristorante Miron"}
],
lambda o: o
)
# select_related works with fields from the parent object as if they
# were a normal part of the model.
self.assertNumQueries(2,
lambda: ItalianRestaurant.objects.all()[0].chef
)
self.assertNumQueries(1,
lambda: ItalianRestaurant.objects.select_related("chef")[0].chef
)
def test_mixin_init(self):
m = MixinModel()
self.assertEqual(m.other_attr, 1)
|
ntymtsiv/tempest | refs/heads/master | tempest/api/object_storage/test_object_expiry.py | 1 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.api.object_storage import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class ObjectExpiryTest(base.BaseObjectTest):
@classmethod
def setUpClass(cls):
super(ObjectExpiryTest, cls).setUpClass()
cls.container_name = data_utils.rand_name(name='TestContainer')
cls.container_client.create_container(cls.container_name)
def setUp(self):
super(ObjectExpiryTest, self).setUp()
# create object
self.object_name = data_utils.rand_name(name='TestObject')
resp, _ = self.object_client.create_object(self.container_name,
self.object_name, '')
@classmethod
def tearDownClass(cls):
cls.delete_containers([cls.container_name])
super(ObjectExpiryTest, cls).tearDownClass()
def _test_object_expiry(self, metadata):
# update object metadata
resp, _ = \
self.object_client.update_object_metadata(self.container_name,
self.object_name,
metadata,
metadata_prefix='')
# verify object metadata
resp, _ = \
self.object_client.list_object_metadata(self.container_name,
self.object_name)
self.assertEqual(resp['status'], '200')
self.assertHeaders(resp, 'Object', 'HEAD')
self.assertIn('x-delete-at', resp)
resp, body = self.object_client.get_object(self.container_name,
self.object_name)
self.assertEqual(resp['status'], '200')
self.assertHeaders(resp, 'Object', 'GET')
self.assertIn('x-delete-at', resp)
# sleep for over 5 seconds, so that object expires
time.sleep(5)
# object should not be there anymore
self.assertRaises(exceptions.NotFound, self.object_client.get_object,
self.container_name, self.object_name)
@attr(type='gate')
def test_get_object_after_expiry_time(self):
metadata = {'X-Delete-After': '3'}
self._test_object_expiry(metadata)
@attr(type='gate')
def test_get_object_at_expiry_time(self):
metadata = {'X-Delete-At': str(int(time.time()) + 3)}
self._test_object_expiry(metadata)
|
ISEAGE-ISU/cdc2-2015-www | refs/heads/master | cdc/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
40223150/2015cd_midterm | refs/heads/master | static/Brython3.1.0-20150301-090019/Lib/xml/dom/xmlbuilder.py | 873 | """Implementation of the DOM Level 3 'LS-Load' feature."""
import copy
import xml.dom
from xml.dom.NodeFilter import NodeFilter
__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
class Options:
"""Features object that has variables set for each DOMBuilder feature.
The DOMBuilder class uses an instance of this class to pass settings to
the ExpatBuilder class.
"""
# Note that the DOMBuilder class in LoadSave constrains which of these
# values can be set using the DOM Level 3 LoadSave feature.
namespaces = 1
namespace_declarations = True
validation = False
external_parameter_entities = True
external_general_entities = True
external_dtd_subset = True
validate_if_schema = False
validate = False
datatype_normalization = False
create_entity_ref_nodes = True
entities = True
whitespace_in_element_content = True
cdata_sections = True
comments = True
charset_overrides_xml_encoding = True
infoset = False
supported_mediatypes_only = False
errorHandler = None
filter = None
class DOMBuilder:
entityResolver = None
errorHandler = None
filter = None
ACTION_REPLACE = 1
ACTION_APPEND_AS_CHILDREN = 2
ACTION_INSERT_AFTER = 3
ACTION_INSERT_BEFORE = 4
_legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
def __init__(self):
self._options = Options()
def _get_entityResolver(self):
return self.entityResolver
def _set_entityResolver(self, entityResolver):
self.entityResolver = entityResolver
def _get_errorHandler(self):
return self.errorHandler
def _set_errorHandler(self, errorHandler):
self.errorHandler = errorHandler
def _get_filter(self):
return self.filter
def _set_filter(self, filter):
self.filter = filter
def setFeature(self, name, state):
if self.supportsFeature(name):
state = state and 1 or 0
try:
settings = self._settings[(_name_xform(name), state)]
except KeyError:
raise xml.dom.NotSupportedErr(
"unsupported feature: %r" % (name,))
else:
for name, value in settings:
setattr(self._options, name, value)
else:
raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
def supportsFeature(self, name):
return hasattr(self._options, _name_xform(name))
def canSetFeature(self, name, state):
key = (_name_xform(name), state and 1 or 0)
return key in self._settings
# This dictionary maps from (feature,value) to a list of
# (option,value) pairs that should be set on the Options object.
# If a (feature,value) setting is not in this dictionary, it is
# not supported by the DOMBuilder.
#
_settings = {
("namespace_declarations", 0): [
("namespace_declarations", 0)],
("namespace_declarations", 1): [
("namespace_declarations", 1)],
("validation", 0): [
("validation", 0)],
("external_general_entities", 0): [
("external_general_entities", 0)],
("external_general_entities", 1): [
("external_general_entities", 1)],
("external_parameter_entities", 0): [
("external_parameter_entities", 0)],
("external_parameter_entities", 1): [
("external_parameter_entities", 1)],
("validate_if_schema", 0): [
("validate_if_schema", 0)],
("create_entity_ref_nodes", 0): [
("create_entity_ref_nodes", 0)],
("create_entity_ref_nodes", 1): [
("create_entity_ref_nodes", 1)],
("entities", 0): [
("create_entity_ref_nodes", 0),
("entities", 0)],
("entities", 1): [
("entities", 1)],
("whitespace_in_element_content", 0): [
("whitespace_in_element_content", 0)],
("whitespace_in_element_content", 1): [
("whitespace_in_element_content", 1)],
("cdata_sections", 0): [
("cdata_sections", 0)],
("cdata_sections", 1): [
("cdata_sections", 1)],
("comments", 0): [
("comments", 0)],
("comments", 1): [
("comments", 1)],
("charset_overrides_xml_encoding", 0): [
("charset_overrides_xml_encoding", 0)],
("charset_overrides_xml_encoding", 1): [
("charset_overrides_xml_encoding", 1)],
("infoset", 0): [],
("infoset", 1): [
("namespace_declarations", 0),
("validate_if_schema", 0),
("create_entity_ref_nodes", 0),
("entities", 0),
("cdata_sections", 0),
("datatype_normalization", 1),
("whitespace_in_element_content", 1),
("comments", 1),
("charset_overrides_xml_encoding", 1)],
("supported_mediatypes_only", 0): [
("supported_mediatypes_only", 0)],
("namespaces", 0): [
("namespaces", 0)],
("namespaces", 1): [
("namespaces", 1)],
}
def getFeature(self, name):
xname = _name_xform(name)
try:
return getattr(self._options, xname)
except AttributeError:
if name == "infoset":
options = self._options
return (options.datatype_normalization
and options.whitespace_in_element_content
and options.comments
and options.charset_overrides_xml_encoding
and not (options.namespace_declarations
or options.validate_if_schema
or options.create_entity_ref_nodes
or options.entities
or options.cdata_sections))
raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
def parseURI(self, uri):
if self.entityResolver:
input = self.entityResolver.resolveEntity(None, uri)
else:
input = DOMEntityResolver().resolveEntity(None, uri)
return self.parse(input)
def parse(self, input):
options = copy.copy(self._options)
options.filter = self.filter
options.errorHandler = self.errorHandler
fp = input.byteStream
if fp is None and options.systemId:
import urllib.request
fp = urllib.request.urlopen(input.systemId)
return self._parse_bytestream(fp, options)
def parseWithContext(self, input, cnode, action):
if action not in self._legal_actions:
raise ValueError("not a legal action")
raise NotImplementedError("Haven't written this yet...")
def _parse_bytestream(self, stream, options):
import xml.dom.expatbuilder
builder = xml.dom.expatbuilder.makeBuilder(options)
return builder.parseFile(stream)
def _name_xform(name):
return name.lower().replace('-', '_')
class DOMEntityResolver(object):
__slots__ = '_opener',
def resolveEntity(self, publicId, systemId):
assert systemId is not None
source = DOMInputSource()
source.publicId = publicId
source.systemId = systemId
source.byteStream = self._get_opener().open(systemId)
# determine the encoding if the transport provided it
source.encoding = self._guess_media_encoding(source)
# determine the base URI is we can
import posixpath, urllib.parse
parts = urllib.parse.urlparse(systemId)
scheme, netloc, path, params, query, fragment = parts
# XXX should we check the scheme here as well?
if path and not path.endswith("/"):
path = posixpath.dirname(path) + "/"
parts = scheme, netloc, path, params, query, fragment
source.baseURI = urllib.parse.urlunparse(parts)
return source
def _get_opener(self):
try:
return self._opener
except AttributeError:
self._opener = self._create_opener()
return self._opener
def _create_opener(self):
import urllib.request
return urllib.request.build_opener()
def _guess_media_encoding(self, source):
info = source.byteStream.info()
if "Content-Type" in info:
for param in info.getplist():
if param.startswith("charset="):
return param.split("=", 1)[1].lower()
class DOMInputSource(object):
__slots__ = ('byteStream', 'characterStream', 'stringData',
'encoding', 'publicId', 'systemId', 'baseURI')
def __init__(self):
self.byteStream = None
self.characterStream = None
self.stringData = None
self.encoding = None
self.publicId = None
self.systemId = None
self.baseURI = None
def _get_byteStream(self):
return self.byteStream
def _set_byteStream(self, byteStream):
self.byteStream = byteStream
def _get_characterStream(self):
return self.characterStream
def _set_characterStream(self, characterStream):
self.characterStream = characterStream
def _get_stringData(self):
return self.stringData
def _set_stringData(self, data):
self.stringData = data
def _get_encoding(self):
return self.encoding
def _set_encoding(self, encoding):
self.encoding = encoding
def _get_publicId(self):
return self.publicId
def _set_publicId(self, publicId):
self.publicId = publicId
def _get_systemId(self):
return self.systemId
def _set_systemId(self, systemId):
self.systemId = systemId
def _get_baseURI(self):
return self.baseURI
def _set_baseURI(self, uri):
self.baseURI = uri
class DOMBuilderFilter:
"""Element filter which can be used to tailor construction of
a DOM instance.
"""
# There's really no need for this class; concrete implementations
# should just implement the endElement() and startElement()
# methods as appropriate. Using this makes it easy to only
# implement one of them.
FILTER_ACCEPT = 1
FILTER_REJECT = 2
FILTER_SKIP = 3
FILTER_INTERRUPT = 4
whatToShow = NodeFilter.SHOW_ALL
def _get_whatToShow(self):
return self.whatToShow
def acceptNode(self, element):
return self.FILTER_ACCEPT
def startContainer(self, element):
return self.FILTER_ACCEPT
del NodeFilter
class DocumentLS:
"""Mixin to create documents that conform to the load/save spec."""
async = False
def _get_async(self):
return False
def _set_async(self, async):
if async:
raise xml.dom.NotSupportedErr(
"asynchronous document loading is not supported")
def abort(self):
# What does it mean to "clear" a document? Does the
# documentElement disappear?
raise NotImplementedError(
"haven't figured out what this means yet")
def load(self, uri):
raise NotImplementedError("haven't written this yet")
def loadXML(self, source):
raise NotImplementedError("haven't written this yet")
def saveXML(self, snode):
if snode is None:
snode = self
elif snode.ownerDocument is not self:
raise xml.dom.WrongDocumentErr()
return snode.toxml()
class DOMImplementationLS:
MODE_SYNCHRONOUS = 1
MODE_ASYNCHRONOUS = 2
def createDOMBuilder(self, mode, schemaType):
if schemaType is not None:
raise xml.dom.NotSupportedErr(
"schemaType not yet supported")
if mode == self.MODE_SYNCHRONOUS:
return DOMBuilder()
if mode == self.MODE_ASYNCHRONOUS:
raise xml.dom.NotSupportedErr(
"asynchronous builders are not supported")
raise ValueError("unknown value for mode")
def createDOMWriter(self):
raise NotImplementedError(
"the writer interface hasn't been written yet!")
def createDOMInputSource(self):
return DOMInputSource()
|
MrMC/mrmc | refs/heads/master | tools/EventClients/examples/python/example_mouse.py | 262 | #!/usr/bin/python
# This is a simple example showing how you can send mouse movement
# events to XBMC.
# NOTE: Read the comments in 'example_button1.py' for a more detailed
# explanation.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO and can contain an icon
packet = PacketHELO("Example Mouse", ICON_PNG,
"../../icons/mouse.png")
packet.send(sock, addr)
# wait for notification window to close (in XBMC)
time.sleep(2)
# send mouse events to take cursor from top left to bottom right of the screen
# here 0 to 65535 will map to XBMC's screen width and height.
# Specifying absolute mouse coordinates is unsupported currently.
for i in range(0, 65535, 2):
packet = PacketMOUSE(i,i)
packet.send(sock, addr)
# ok we're done, close the connection
packet = PacketBYE()
packet.send(sock, addr)
if __name__=="__main__":
main()
|
Galexrt/zulip | refs/heads/master | zerver/migrations/0049_userprofile_pm_content_in_desktop_notifications.py | 3 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0048_enter_sends_default_to_false'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='pm_content_in_desktop_notifications',
field=models.BooleanField(default=True),
),
]
|
MarcosCommunity/odoo | refs/heads/marcos-8.0 | addons/l10n_in_hr_payroll/report/payment_advice_report.py | 340 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class payment_advice_report(osv.osv):
_name = "payment.advice.report"
_description = "Payment Advice Analysis"
_auto = False
_columns = {
'name':fields.char('Name', readonly=True),
'date': fields.date('Date', readonly=True,),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state':fields.selection([
('draft', 'Draft'),
('confirm', 'Confirmed'),
('cancel', 'Cancelled'),
], 'Status', select=True, readonly=True),
'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),
'nbr': fields.integer('# Payment Lines', readonly=True),
'number':fields.char('Number', readonly=True),
'bysal': fields.float('By Salary', readonly=True),
'bank_id':fields.many2one('res.bank', 'Bank', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'cheque_nos':fields.char('Cheque Numbers', readonly=True),
'neft': fields.boolean('NEFT Transaction', readonly=True),
'ifsc_code': fields.char('IFSC Code', size=32, readonly=True),
'employee_bank_no': fields.char('Employee Bank Account', required=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'payment_advice_report')
cr.execute("""
create or replace view payment_advice_report as (
select
min(l.id) as id,
sum(l.bysal) as bysal,
p.name,
p.state,
p.date,
p.number,
p.company_id,
p.bank_id,
p.chaque_nos as cheque_nos,
p.neft,
l.employee_id,
l.ifsc_code,
l.name as employee_bank_no,
to_char(p.date, 'YYYY') as year,
to_char(p.date, 'MM') as month,
to_char(p.date, 'YYYY-MM-DD') as day,
1 as nbr
from
hr_payroll_advice as p
left join hr_payroll_advice_line as l on (p.id=l.advice_id)
where
l.employee_id IS NOT NULL
group by
p.number,p.name,p.date,p.state,p.company_id,p.bank_id,p.chaque_nos,p.neft,
l.employee_id,l.advice_id,l.bysal,l.ifsc_code, l.name
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
EricMuller/mywebmarks-backend | refs/heads/master | requirements/twisted/Twisted-17.1.0/src/twisted/internet/reactor.py | 63 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The reactor is the Twisted event loop within Twisted, the loop which drives
applications using Twisted. The reactor provides APIs for networking,
threading, dispatching events, and more.
The default reactor depends on the platform and will be installed if this
module is imported without another reactor being explicitly installed
beforehand. Regardless of which reactor is installed, importing this module is
the correct way to get a reference to it.
New application code should prefer to pass and accept the reactor as a
parameter where it is needed, rather than relying on being able to import this
module to get a reference. This simplifies unit testing and may make it easier
to one day support multiple reactors (as a performance enhancement), though
this is not currently possible.
@see: L{IReactorCore<twisted.internet.interfaces.IReactorCore>}
@see: L{IReactorTime<twisted.internet.interfaces.IReactorTime>}
@see: L{IReactorProcess<twisted.internet.interfaces.IReactorProcess>}
@see: L{IReactorTCP<twisted.internet.interfaces.IReactorTCP>}
@see: L{IReactorSSL<twisted.internet.interfaces.IReactorSSL>}
@see: L{IReactorUDP<twisted.internet.interfaces.IReactorUDP>}
@see: L{IReactorMulticast<twisted.internet.interfaces.IReactorMulticast>}
@see: L{IReactorUNIX<twisted.internet.interfaces.IReactorUNIX>}
@see: L{IReactorUNIXDatagram<twisted.internet.interfaces.IReactorUNIXDatagram>}
@see: L{IReactorFDSet<twisted.internet.interfaces.IReactorFDSet>}
@see: L{IReactorThreads<twisted.internet.interfaces.IReactorThreads>}
@see: L{IReactorPluggableResolver<twisted.internet.interfaces.IReactorPluggableResolver>}
"""
from __future__ import division, absolute_import
import sys
del sys.modules['twisted.internet.reactor']
from twisted.internet import default
default.install()
|
zhujzhuo/openstack-trove | refs/heads/master | trove/tests/api/mgmt/instances_actions.py | 4 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mox3 import mox
import trove.common.instance as tr_instance
from trove.backup import models as backup_models
from trove.backup import state
from trove.common.context import TroveContext
from trove.instance.tasks import InstanceTasks
from trove.instance import models as imodels
from trove.instance.models import DBInstance
from trove.extensions.mgmt.instances.models import MgmtInstance
from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
from novaclient.v2.servers import Server
from proboscis import test
from proboscis import before_class
from proboscis import after_class
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_raises
from trove.common import exception
from trove.extensions.mgmt.instances.service import MgmtInstanceController
GROUP = "dbaas.api.mgmt.action.reset-task-status"
class MgmtInstanceBase(object):
def setUp(self):
self.mock = mox.Mox()
self._create_instance()
self.controller = MgmtInstanceController()
def tearDown(self):
self.db_info.delete()
def _create_instance(self):
self.context = TroveContext(is_admin=True)
self.tenant_id = 999
self.db_info = DBInstance.create(
id="inst-id-1",
name="instance",
flavor_id=1,
datastore_version_id=test_config.dbaas_datastore_version_id,
tenant_id=self.tenant_id,
volume_size=None,
task_status=InstanceTasks.NONE)
self.server = self.mock.CreateMock(Server)
self.instance = imodels.Instance(
self.context,
self.db_info,
self.server,
datastore_status=imodels.InstanceServiceStatus(
tr_instance.ServiceStatuses.RUNNING))
def _make_request(self, path='/', context=None, **kwargs):
from webob import Request
path = '/'
print("path: %s" % path)
return Request.blank(path=path, environ={'trove.context': context},
**kwargs)
def _reload_db_info(self):
self.db_info = DBInstance.find_by(id=self.db_info.id, deleted=False)
@test(groups=[GROUP])
class RestartTaskStatusTests(MgmtInstanceBase):
@before_class
def setUp(self):
super(RestartTaskStatusTests, self).setUp()
self.backups_to_clear = []
@after_class
def tearDown(self):
super(RestartTaskStatusTests, self).tearDown()
def _change_task_status_to(self, new_task_status):
self.db_info.task_status = new_task_status
self.db_info.save()
def _make_request(self, path='/', context=None, **kwargs):
req = super(RestartTaskStatusTests, self)._make_request(path, context,
**kwargs)
req.method = 'POST'
body = {'reset-task-status': {}}
return req, body
def reset_task_status(self):
self.mock.StubOutWithMock(MgmtInstance, 'load')
MgmtInstance.load(context=self.context,
id=self.db_info.id).AndReturn(self.instance)
self.mock.ReplayAll()
req, body = self._make_request(context=self.context)
self.controller = MgmtInstanceController()
resp = self.controller.action(req, body, self.tenant_id,
self.db_info.id)
self.mock.UnsetStubs()
self.mock.VerifyAll()
return resp
@test
def mgmt_restart_task_requires_admin_account(self):
context = TroveContext(is_admin=False)
req, body = self._make_request(context=context)
self.controller = MgmtInstanceController()
assert_raises(exception.Forbidden, self.controller.action,
req, body, self.tenant_id, self.db_info.id)
@test
def mgmt_restart_task_returns_json(self):
resp = self.reset_task_status()
out = resp.data("application/json")
assert_equal(out, None)
@test
def mgmt_restart_task_changes_status_to_none(self):
self._change_task_status_to(InstanceTasks.BUILDING)
self.reset_task_status()
self._reload_db_info()
assert_equal(self.db_info.task_status, InstanceTasks.NONE)
@test
def mgmt_reset_task_status_clears_backups(self):
self.reset_task_status()
self._reload_db_info()
assert_equal(self.db_info.task_status, InstanceTasks.NONE)
user = test_config.users.find_user(Requirements(is_admin=False))
dbaas = create_dbaas_client(user)
admin = test_config.users.find_user(Requirements(is_admin=True))
admin_dbaas = create_dbaas_client(admin)
result = dbaas.instances.backups(self.db_info.id)
assert_equal(0, len(result))
# Create some backups.
backup_models.DBBackup.create(
name="forever_new",
description="forever new",
tenant_id=self.tenant_id,
state=state.BackupState.NEW,
instance_id=self.db_info.id,
deleted=False)
backup_models.DBBackup.create(
name="forever_build",
description="forever build",
tenant_id=self.tenant_id,
state=state.BackupState.BUILDING,
instance_id=self.db_info.id,
deleted=False)
backup_models.DBBackup.create(
name="forever_completed",
description="forever completed",
tenant_id=self.tenant_id,
state=state.BackupState.COMPLETED,
instance_id=self.db_info.id,
deleted=False)
# List the backups for this instance.
# There ought to be three in the admin tenant, but
# none in a different user's tenant.
result = dbaas.instances.backups(self.db_info.id)
assert_equal(0, len(result))
result = admin_dbaas.instances.backups(self.db_info.id)
assert_equal(3, len(result))
self.backups_to_clear = result
# Reset the task status.
self.reset_task_status()
self._reload_db_info()
result = admin_dbaas.instances.backups(self.db_info.id)
assert_equal(3, len(result))
for backup in result:
if backup.name == 'forever_completed':
assert_equal(backup.status,
state.BackupState.COMPLETED)
else:
assert_equal(backup.status, state.BackupState.FAILED)
@test(runs_after=[mgmt_reset_task_status_clears_backups])
def clear_test_backups(self):
for backup in self.backups_to_clear:
found_backup = backup_models.DBBackup.find_by(id=backup.id)
found_backup.delete()
admin = test_config.users.find_user(Requirements(is_admin=True))
admin_dbaas = create_dbaas_client(admin)
result = admin_dbaas.instances.backups(self.db_info.id)
assert_equal(0, len(result))
|
Nekmo/django-categories | refs/heads/master | categories/south_migrations/0006_auto__add_categoryrelation.py | 14 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CategoryRelation'
db.create_table('categories_categoryrelation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('story', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['categories.Category'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('relation_type', self.gf('django.db.models.fields.CharField')(max_length='200', null=True, blank=True)),
))
db.send_create_signal('categories', ['CategoryRelation'])
def backwards(self, orm):
# Deleting model 'CategoryRelation'
db.delete_table('categories_categoryrelation')
models = {
'categories.category': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Category'},
'alternate_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'alternate_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'meta_extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['categories.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'categories.categoryrelation': {
'Meta': {'object_name': 'CategoryRelation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'relation_type': ('django.db.models.fields.CharField', [], {'max_length': "'200'", 'null': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['categories']
|
nopoolcoin/p2pool-mom | refs/heads/master | SOAPpy/Types.py | 289 | from __future__ import nested_scopes
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Types.py 1496 2010-03-04 23:46:17Z pooryorick $'
from version import __version__
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
def _marshalData(self):
return self._data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
#s = ''
#
#s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
s = "%02d:%02d:%02d" % d
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647L:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
# only add to key order list if it does not already
# exist in list
if not (name in self._keyord):
if pos < len(x):
self._keyord[pos] = name
else:
self._keyord.append(name)
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and their contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
|
chuckbasstan123/pyTorch_project | refs/heads/master | mnist_hogwild/train.py | 1 | import os
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
def train(rank, args, model):
torch.manual_seed(args.seed + rank)
for param in model.parameters():
# Break gradient sharing
param.grad.data = param.grad.data.clone()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, num_workers=1)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train_epoch(epoch, args, model, train_loader, optimizer)
test_epoch(epoch, args, model, test_loader)
def train_epoch(epoch, args, model, data_loader, optimizer):
model.train()
pid = os.getpid()
samples_seen = 0
for batch_idx, (data, target) in enumerate(data_loader):
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('{}\tTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
pid, epoch, batch_idx * len(data), len(data_loader.dataset),
100. * batch_idx / len(data_loader), loss.data[0]))
def test_epoch(epoch, args, model, data_loader):
model.eval()
test_loss = 0
correct = 0
for data, target in data_loader:
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(data_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(data_loader.dataset),
100. * correct / len(data_loader.dataset)))
|
Atheros1/PyBitmessage | refs/heads/master | src/bitmessageqt/newaddressdialog.py | 19 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'newaddressdialog.ui'
#
# Created: Sun Sep 15 23:53:31 2013
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_NewAddressDialog(object):
def setupUi(self, NewAddressDialog):
NewAddressDialog.setObjectName(_fromUtf8("NewAddressDialog"))
NewAddressDialog.resize(723, 704)
self.formLayout = QtGui.QFormLayout(NewAddressDialog)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label = QtGui.QLabel(NewAddressDialog)
self.label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.label)
self.label_5 = QtGui.QLabel(NewAddressDialog)
self.label_5.setWordWrap(True)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.formLayout.setWidget(2, QtGui.QFormLayout.SpanningRole, self.label_5)
self.line = QtGui.QFrame(NewAddressDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.line.sizePolicy().hasHeightForWidth())
self.line.setSizePolicy(sizePolicy)
self.line.setMinimumSize(QtCore.QSize(100, 2))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.formLayout.setWidget(4, QtGui.QFormLayout.SpanningRole, self.line)
self.radioButtonRandomAddress = QtGui.QRadioButton(NewAddressDialog)
self.radioButtonRandomAddress.setChecked(True)
self.radioButtonRandomAddress.setObjectName(_fromUtf8("radioButtonRandomAddress"))
self.buttonGroup = QtGui.QButtonGroup(NewAddressDialog)
self.buttonGroup.setObjectName(_fromUtf8("buttonGroup"))
self.buttonGroup.addButton(self.radioButtonRandomAddress)
self.formLayout.setWidget(5, QtGui.QFormLayout.SpanningRole, self.radioButtonRandomAddress)
self.radioButtonDeterministicAddress = QtGui.QRadioButton(NewAddressDialog)
self.radioButtonDeterministicAddress.setObjectName(_fromUtf8("radioButtonDeterministicAddress"))
self.buttonGroup.addButton(self.radioButtonDeterministicAddress)
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.radioButtonDeterministicAddress)
self.checkBoxEighteenByteRipe = QtGui.QCheckBox(NewAddressDialog)
self.checkBoxEighteenByteRipe.setObjectName(_fromUtf8("checkBoxEighteenByteRipe"))
self.formLayout.setWidget(9, QtGui.QFormLayout.SpanningRole, self.checkBoxEighteenByteRipe)
self.groupBoxDeterministic = QtGui.QGroupBox(NewAddressDialog)
self.groupBoxDeterministic.setObjectName(_fromUtf8("groupBoxDeterministic"))
self.gridLayout = QtGui.QGridLayout(self.groupBoxDeterministic)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_9 = QtGui.QLabel(self.groupBoxDeterministic)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 6, 0, 1, 1)
self.label_8 = QtGui.QLabel(self.groupBoxDeterministic)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 5, 0, 1, 3)
self.spinBoxNumberOfAddressesToMake = QtGui.QSpinBox(self.groupBoxDeterministic)
self.spinBoxNumberOfAddressesToMake.setMinimum(1)
self.spinBoxNumberOfAddressesToMake.setProperty("value", 8)
self.spinBoxNumberOfAddressesToMake.setObjectName(_fromUtf8("spinBoxNumberOfAddressesToMake"))
self.gridLayout.addWidget(self.spinBoxNumberOfAddressesToMake, 4, 3, 1, 1)
self.label_6 = QtGui.QLabel(self.groupBoxDeterministic)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 0, 0, 1, 1)
self.label_11 = QtGui.QLabel(self.groupBoxDeterministic)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout.addWidget(self.label_11, 4, 0, 1, 3)
spacerItem = QtGui.QSpacerItem(73, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 6, 1, 1, 1)
self.label_10 = QtGui.QLabel(self.groupBoxDeterministic)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout.addWidget(self.label_10, 6, 2, 1, 1)
spacerItem1 = QtGui.QSpacerItem(42, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 6, 3, 1, 1)
self.label_7 = QtGui.QLabel(self.groupBoxDeterministic)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 2, 0, 1, 1)
self.lineEditPassphraseAgain = QtGui.QLineEdit(self.groupBoxDeterministic)
self.lineEditPassphraseAgain.setEchoMode(QtGui.QLineEdit.Password)
self.lineEditPassphraseAgain.setObjectName(_fromUtf8("lineEditPassphraseAgain"))
self.gridLayout.addWidget(self.lineEditPassphraseAgain, 3, 0, 1, 4)
self.lineEditPassphrase = QtGui.QLineEdit(self.groupBoxDeterministic)
self.lineEditPassphrase.setInputMethodHints(QtCore.Qt.ImhHiddenText|QtCore.Qt.ImhNoAutoUppercase|QtCore.Qt.ImhNoPredictiveText)
self.lineEditPassphrase.setEchoMode(QtGui.QLineEdit.Password)
self.lineEditPassphrase.setObjectName(_fromUtf8("lineEditPassphrase"))
self.gridLayout.addWidget(self.lineEditPassphrase, 1, 0, 1, 4)
self.formLayout.setWidget(8, QtGui.QFormLayout.LabelRole, self.groupBoxDeterministic)
self.groupBox = QtGui.QGroupBox(NewAddressDialog)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 2)
self.newaddresslabel = QtGui.QLineEdit(self.groupBox)
self.newaddresslabel.setObjectName(_fromUtf8("newaddresslabel"))
self.gridLayout_2.addWidget(self.newaddresslabel, 1, 0, 1, 2)
self.radioButtonMostAvailable = QtGui.QRadioButton(self.groupBox)
self.radioButtonMostAvailable.setChecked(True)
self.radioButtonMostAvailable.setObjectName(_fromUtf8("radioButtonMostAvailable"))
self.gridLayout_2.addWidget(self.radioButtonMostAvailable, 2, 0, 1, 2)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 3, 1, 1, 1)
self.radioButtonExisting = QtGui.QRadioButton(self.groupBox)
self.radioButtonExisting.setChecked(False)
self.radioButtonExisting.setObjectName(_fromUtf8("radioButtonExisting"))
self.gridLayout_2.addWidget(self.radioButtonExisting, 4, 0, 1, 2)
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 5, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(13, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 6, 0, 1, 1)
self.comboBoxExisting = QtGui.QComboBox(self.groupBox)
self.comboBoxExisting.setEnabled(False)
self.comboBoxExisting.setEditable(True)
self.comboBoxExisting.setObjectName(_fromUtf8("comboBoxExisting"))
self.gridLayout_2.addWidget(self.comboBoxExisting, 6, 1, 1, 1)
self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.groupBox)
self.buttonBox = QtGui.QDialogButtonBox(NewAddressDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
self.buttonBox.setMinimumSize(QtCore.QSize(160, 0))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.formLayout.setWidget(10, QtGui.QFormLayout.SpanningRole, self.buttonBox)
self.retranslateUi(NewAddressDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewAddressDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewAddressDialog.reject)
QtCore.QObject.connect(self.radioButtonExisting, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.comboBoxExisting.setEnabled)
QtCore.QObject.connect(self.radioButtonDeterministicAddress, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.groupBoxDeterministic.setShown)
QtCore.QObject.connect(self.radioButtonRandomAddress, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.groupBox.setShown)
QtCore.QMetaObject.connectSlotsByName(NewAddressDialog)
NewAddressDialog.setTabOrder(self.radioButtonRandomAddress, self.radioButtonDeterministicAddress)
NewAddressDialog.setTabOrder(self.radioButtonDeterministicAddress, self.newaddresslabel)
NewAddressDialog.setTabOrder(self.newaddresslabel, self.radioButtonMostAvailable)
NewAddressDialog.setTabOrder(self.radioButtonMostAvailable, self.radioButtonExisting)
NewAddressDialog.setTabOrder(self.radioButtonExisting, self.comboBoxExisting)
NewAddressDialog.setTabOrder(self.comboBoxExisting, self.lineEditPassphrase)
NewAddressDialog.setTabOrder(self.lineEditPassphrase, self.lineEditPassphraseAgain)
NewAddressDialog.setTabOrder(self.lineEditPassphraseAgain, self.spinBoxNumberOfAddressesToMake)
NewAddressDialog.setTabOrder(self.spinBoxNumberOfAddressesToMake, self.checkBoxEighteenByteRipe)
NewAddressDialog.setTabOrder(self.checkBoxEighteenByteRipe, self.buttonBox)
def retranslateUi(self, NewAddressDialog):
NewAddressDialog.setWindowTitle(_translate("NewAddressDialog", "Create new Address", None))
self.label.setText(_translate("NewAddressDialog", "Here you may generate as many addresses as you like. Indeed, creating and abandoning addresses is encouraged. You may generate addresses by using either random numbers or by using a passphrase. If you use a passphrase, the address is called a \"deterministic\" address.\n"
"The \'Random Number\' option is selected by default but deterministic addresses have several pros and cons:", None))
self.label_5.setText(_translate("NewAddressDialog", "<html><head/><body><p><span style=\" font-weight:600;\">Pros:<br/></span>You can recreate your addresses on any computer from memory. <br/>You need-not worry about backing up your keys.dat file as long as you can remember your passphrase. <br/><span style=\" font-weight:600;\">Cons:<br/></span>You must remember (or write down) your passphrase if you expect to be able to recreate your keys if they are lost. <br/>You must remember the address version number and the stream number along with your passphrase. <br/>If you choose a weak passphrase and someone on the Internet can brute-force it, they can read your messages and send messages as you.</p></body></html>", None))
self.radioButtonRandomAddress.setText(_translate("NewAddressDialog", "Use a random number generator to make an address", None))
self.radioButtonDeterministicAddress.setText(_translate("NewAddressDialog", "Use a passphrase to make addresses", None))
self.checkBoxEighteenByteRipe.setText(_translate("NewAddressDialog", "Spend several minutes of extra computing time to make the address(es) 1 or 2 characters shorter", None))
self.groupBoxDeterministic.setTitle(_translate("NewAddressDialog", "Make deterministic addresses", None))
self.label_9.setText(_translate("NewAddressDialog", "Address version number: 4", None))
self.label_8.setText(_translate("NewAddressDialog", "In addition to your passphrase, you must remember these numbers:", None))
self.label_6.setText(_translate("NewAddressDialog", "Passphrase", None))
self.label_11.setText(_translate("NewAddressDialog", "Number of addresses to make based on your passphrase:", None))
self.label_10.setText(_translate("NewAddressDialog", "Stream number: 1", None))
self.label_7.setText(_translate("NewAddressDialog", "Retype passphrase", None))
self.groupBox.setTitle(_translate("NewAddressDialog", "Randomly generate address", None))
self.label_2.setText(_translate("NewAddressDialog", "Label (not shown to anyone except you)", None))
self.radioButtonMostAvailable.setText(_translate("NewAddressDialog", "Use the most available stream", None))
self.label_3.setText(_translate("NewAddressDialog", " (best if this is the first of many addresses you will create)", None))
self.radioButtonExisting.setText(_translate("NewAddressDialog", "Use the same stream as an existing address", None))
self.label_4.setText(_translate("NewAddressDialog", "(saves you some bandwidth and processing power)", None))
|
jtattermusch/grpc | refs/heads/master | tools/run_tests/lb_interop_tests/gen_build_yaml.py | 13 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == 'google_default_credentials':
return 'alts', 'alts', 'tls'
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_no_data_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [{
'transport_sec': 'insecure',
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ['go', 'java']
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_falls_back_because_no_backends_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_no_backends()
def generate_client_falls_back_because_balancer_connection_broken():
all_configs = []
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs = ['java']
config = {
'name':
'client_falls_back_because_balancer_connection_broken_%s' %
transport_sec,
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': 'insecure',
'short_stream': False,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
def generate_client_referred_to_backend_multiple_balancers():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_balancers_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
],
'backend_configs': [{
'transport_sec': backend_sec,
},],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_balancers()
print(yaml.dump({
'lb_interop_test_scenarios': all_scenarios,
}))
|
rchand31/django-haystack | refs/heads/master | haystack/__init__.py | 3 | import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core import signals
from haystack.constants import DEFAULT_ALIAS
from haystack.utils import loading
__author__ = 'Daniel Lindsley'
__version__ = (2, 0, 0, 'alpha')
__all__ = ['backend']
# Setup default logging.
log = logging.getLogger('haystack')
stream = logging.StreamHandler()
stream.setLevel(logging.INFO)
log.addHandler(stream)
# Help people clean up from 1.X.
if hasattr(settings, 'HAYSTACK_SITECONF'):
raise ImproperlyConfigured('The HAYSTACK_SITECONF setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_SEARCH_ENGINE'):
raise ImproperlyConfigured('The HAYSTACK_SEARCH_ENGINE setting has been replaced with HAYSTACK_CONNECTIONS.')
if hasattr(settings, 'HAYSTACK_ENABLE_REGISTRATIONS'):
raise ImproperlyConfigured('The HAYSTACK_ENABLE_REGISTRATIONS setting is no longer used & can be removed.')
if hasattr(settings, 'HAYSTACK_INCLUDE_SPELLING'):
raise ImproperlyConfigured('The HAYSTACK_INCLUDE_SPELLING setting is now a per-backend setting & belongs in HAYSTACK_CONNECTIONS.')
# Check the 2.X+ bits.
if not hasattr(settings, 'HAYSTACK_CONNECTIONS'):
raise ImproperlyConfigured('The HAYSTACK_CONNECTIONS setting is required.')
if DEFAULT_ALIAS not in settings.HAYSTACK_CONNECTIONS:
raise ImproperlyConfigured("The default alias '%s' must be included in the HAYSTACK_CONNECTIONS setting." % DEFAULT_ALIAS)
# Load the connections.
connections = loading.ConnectionHandler(settings.HAYSTACK_CONNECTIONS)
# Load the router(s).
connection_router = loading.ConnectionRouter()
if hasattr(settings, 'HAYSTACK_ROUTERS'):
if not isinstance(settings.HAYSTACK_ROUTERS, (list, tuple)):
raise ImproperlyConfigured("The HAYSTACK_ROUTERS setting must be either a list or tuple.")
connection_router = loading.ConnectionRouter(settings.HAYSTACK_ROUTERS)
# Per-request, reset the ghetto query log.
# Probably not extraordinarily thread-safe but should only matter when
# DEBUG = True.
def reset_search_queries(**kwargs):
for conn in connections.all():
conn.reset_queries()
if settings.DEBUG:
signals.request_started.connect(reset_search_queries)
|
anntzer/scikit-learn | refs/heads/main | examples/linear_model/plot_bayesian_ridge.py | 43 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for Bayesian Ridge Regression
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
# #############################################################################
# Generating simulated data with Gaussian weights
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
# #############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
# #############################################################################
# Plot true weights, estimated weights, histogram of the weights, and
# predictions with standard deviations
lw = 2
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='lightgreen', linewidth=lw,
label="Bayesian Ridge estimate")
plt.plot(w, color='gold', linewidth=lw, label="Ground truth")
plt.plot(ols.coef_, color='navy', linestyle='--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='gold', log=True,
edgecolor='black')
plt.scatter(clf.coef_[relevant_features], np.full(len(relevant_features), 5.),
color='navy', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="upper left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=lw)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=0.1)
clf_poly = BayesianRidge()
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial Bayesian Ridge Regression", linewidth=lw)
plt.plot(X_plot, y_plot, color='gold', linewidth=lw,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
|
TsinghuaX/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/progress.py | 127 | '''
Progress class for modules. Represents where a student is in a module.
Useful things to know:
- Use Progress.to_js_status_str() to convert a progress into a simple
status string to pass to js.
- Use Progress.to_js_detail_str() to convert a progress into a more detailed
string to pass to js.
In particular, these functions have a canonical handing of None.
For most subclassing needs, you should only need to reimplement
frac() and __str__().
'''
import numbers
class Progress(object):
'''Represents a progress of a/b (a out of b done)
a and b must be numeric, but not necessarily integer, with
0 <= a <= b and b > 0.
Progress can only represent Progress for modules where that makes sense. Other
modules (e.g. html) should return None from get_progress().
TODO: add tag for module type? Would allow for smarter merging.
'''
def __init__(self, a, b):
'''Construct a Progress object. a and b must be numbers, and must have
0 <= a <= b and b > 0
'''
# Want to do all checking at construction time, so explicitly check types
if not (isinstance(a, numbers.Number) and
isinstance(b, numbers.Number)):
raise TypeError('a and b must be numbers. Passed {0}/{1}'.format(a, b))
if a > b:
a = b
if a < 0:
a = 0
if b <= 0:
raise ValueError('fraction a/b = {0}/{1} must have b > 0'.format(a, b))
self._a = a
self._b = b
def frac(self):
''' Return tuple (a,b) representing progress of a/b'''
return (self._a, self._b)
def percent(self):
''' Returns a percentage progress as a float between 0 and 100.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return 100.0 * a / b
def started(self):
''' Returns True if fractional progress is greater than 0.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
return self.frac()[0] > 0
def inprogress(self):
''' Returns True if fractional progress is strictly between 0 and 1.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a > 0 and a < b
def done(self):
''' Return True if this represents done.
subclassing note: implemented in terms of frac(), assumes sanity
checking is done at construction time.
'''
(a, b) = self.frac()
return a == b
def ternary_str(self):
''' Return a string version of this progress: either
"none", "in_progress", or "done".
subclassing note: implemented in terms of frac()
'''
(a, b) = self.frac()
if a == 0:
return "none"
if a < b:
return "in_progress"
return "done"
def __eq__(self, other):
''' Two Progress objects are equal if they have identical values.
Implemented in terms of frac()'''
if not isinstance(other, Progress):
return False
(a, b) = self.frac()
(a2, b2) = other.frac()
return a == a2 and b == b2
def __ne__(self, other):
''' The opposite of equal'''
return not self.__eq__(other)
def __str__(self):
''' Return a string representation of this string.
subclassing note: implemented in terms of frac().
'''
(a, b) = self.frac()
return "{0}/{1}".format(a, b)
@staticmethod
def add_counts(a, b):
'''Add two progress indicators, assuming that each represents items done:
(a / b) + (c / d) = (a + c) / (b + d).
If either is None, returns the other.
'''
if a is None:
return b
if b is None:
return a
# get numerators + denominators
(n, d) = a.frac()
(n2, d2) = b.frac()
return Progress(n + n2, d + d2)
@staticmethod
def to_js_status_str(progress):
'''
Return the "status string" version of the passed Progress
object that should be passed to js. Use this function when
sending Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return progress.ternary_str()
@staticmethod
def to_js_detail_str(progress):
'''
Return the "detail string" version of the passed Progress
object that should be passed to js. Use this function when
passing Progress objects to js to limit dependencies.
'''
if progress is None:
return "0"
return str(progress)
|
ipld/py-cid | refs/heads/master | cid/cid.py | 1 | # -*- coding: utf-8 -*-
import base58
import multibase
import multihash as mh
from morphys import ensure_bytes, ensure_unicode
import multicodec
class BaseCID(object):
__hash__ = object.__hash__
def __init__(self, version, codec, multihash):
"""
Creates a new CID object. This class should not be used directly, use :py:class:`cid.cid.CIDv0` or
:py:class:`cid.cid.CIDv1` instead.
:param int version: CID version (0 or 1)
:param str codec: codec to be used for encoding the hash
:param str multihash: the multihash
"""
self._version = version
self._codec = codec
self._multihash = ensure_bytes(multihash)
@property
def version(self):
""" CID version """
return self._version
@property
def codec(self):
""" CID codec """
return self._codec
@property
def multihash(self):
""" CID multihash """
return self._multihash
@property
def buffer(self):
raise NotImplementedError
def encode(self, *args, **kwargs):
raise NotImplementedError
def __repr__(self):
def truncate(s, length):
return s[:length] + b'..' if len(s) > length else s
truncate_length = 20
return '{class_}(version={version}, codec={codec}, multihash={multihash})'.format(
class_=self.__class__.__name__,
version=self._version,
codec=self._codec,
multihash=truncate(self._multihash, truncate_length),
)
def __str__(self):
return ensure_unicode(self.encode())
def __eq__(self, other):
return (self.version == other.version) and (self.codec == other.codec) and (self.multihash == other.multihash)
class CIDv0(BaseCID):
""" CID version 0 object """
CODEC = 'dag-pb'
def __init__(self, multihash):
"""
:param bytes multihash: multihash for the CID
"""
super(CIDv0, self).__init__(0, self.CODEC, multihash)
@property
def buffer(self):
"""
The raw representation that will be encoded.
:return: the multihash
:rtype: bytes
"""
return self.multihash
def encode(self):
"""
base58-encoded buffer
:return: encoded representation or CID
:rtype: bytes
"""
return ensure_bytes(base58.b58encode(self.buffer))
def to_v1(self):
"""
Get an equivalent :py:class:`cid.CIDv1` object.
:return: :py:class:`cid.CIDv1` object
:rtype: :py:class:`cid.CIDv1`
"""
return CIDv1(self.CODEC, self.multihash)
class CIDv1(BaseCID):
""" CID version 1 object """
def __init__(self, codec, multihash):
super(CIDv1, self).__init__(1, codec, multihash)
@property
def buffer(self):
"""
The raw representation of the CID
:return: raw representation of the CID
:rtype: bytes
"""
return b''.join([bytes([self.version]), multicodec.add_prefix(self.codec, self.multihash)])
def encode(self, encoding='base58btc'):
"""
Encoded version of the raw representation
:param str encoding: the encoding to use to encode the raw representation, should be supported by
``py-multibase``
:return: encoded raw representation with the given encoding
:rtype: bytes
"""
return multibase.encode(encoding, self.buffer)
def to_v0(self):
"""
Get an equivalent :py:class:`cid.CIDv0` object.
:return: :py:class:`cid.CIDv0` object
:rtype: :py:class:`cid.CIDv0`
:raise ValueError: if the codec is not 'dag-pb'
"""
if self.codec != CIDv0.CODEC:
raise ValueError('CIDv1 can only be converted for codec {}'.format(CIDv0.CODEC))
return CIDv0(self.multihash)
def make_cid(*args):
"""
Creates a :py:class:`cid.CIDv0` or :py:class:`cid.CIDv1` object based on the given parameters
The function supports the following signatures:
make_cid(<base58 encoded multihash CID>) -> CIDv0
make_cid(<multihash CID>) -> CIDv0
make_cid(<multibase encoded multihash CID>) -> CIDv1
make_cid(<version>, <codec>, <multihash>) -> CIDv1
:param args:
- base58-encoded multihash (str or bytes)
- multihash (str or bytes)
- multibase-encoded multihash (str or bytes)
- version:int, codec(str), multihash(str or bytes)
:returns: the respective CID object
:rtype: :py:class:`cid.CIDv0` or :py:class:`cid.CIDv1`
:raises ValueError: if the number of arguments is not 1 or 3
:raises ValueError: if the only argument passed is not a ``str`` or a ``byte``
:raises ValueError: if the string provided is not a valid base58 encoded hash
:raises ValueError: if 3 arguments are passed and version is not 0 or 1
:raises ValueError: if 3 arguments are passed and the ``codec`` is not supported by ``multicodec``
:raises ValueError: if 3 arguments are passed and the ``multihash`` is not ``str`` or ``byte``
:raises ValueError: if 3 arguments are passed with version 0 and codec is not *dag-pb*
"""
if len(args) == 1:
data = args[0]
if isinstance(data, str):
return from_string(data)
elif isinstance(data, bytes):
return from_bytes(data)
else:
raise ValueError('invalid argument passed, expected: str or byte, found: {}'.format(type(data)))
elif len(args) == 3:
version, codec, multihash = args
if version not in (0, 1):
raise ValueError('version should be 0 or 1, {} was provided'.format(version))
if not multicodec.is_codec(codec):
raise ValueError('invalid codec {} provided, please check'.format(codec))
if not (isinstance(multihash, str) or isinstance(multihash, bytes)):
raise ValueError('invalid type for multihash provided, should be str or bytes')
if version == 0:
if codec != CIDv0.CODEC:
raise ValueError('codec for version 0 can only be {}, found: {}'.format(CIDv0.CODEC, codec))
return CIDv0(multihash)
else:
return CIDv1(codec, multihash)
else:
raise ValueError('invalid number of arguments, expected 1 or 3')
def is_cid(cidstr):
"""
Checks if a given input string is valid encoded CID or not.
It takes same input as `cid.make_cid` method with a single argument
:param cidstr: input string which can be a
- base58-encoded multihash
- multihash
- multibase-encoded multihash
:type cidstr: str or bytes
:return: if the value is a valid CID or not
:rtype: bool
"""
try:
return bool(make_cid(cidstr))
except ValueError:
return False
def from_string(cidstr):
"""
Creates a CID object from a encoded form
:param str cidstr: can be
- base58-encoded multihash
- multihash
- multibase-encoded multihash
:return: a CID object
:rtype: :py:class:`cid.CIDv0` or :py:class:`cid.CIDv1`
"""
cidbytes = ensure_bytes(cidstr, 'utf-8')
return from_bytes(cidbytes)
def from_bytes(cidbytes):
"""
Creates a CID object from a encoded form
:param bytes cidbytes: can be
- base58-encoded multihash
- multihash
- multibase-encoded multihash
:return: a CID object
:rtype: :py:class:`cid.CIDv0` or :py:class:`cid.CIDv1`
:raises: `ValueError` if the base58-encoded string is not a valid string
:raises: `ValueError` if the length of the argument is zero
:raises: `ValueError` if the length of decoded CID is invalid
"""
if len(cidbytes) < 2:
raise ValueError('argument length can not be zero')
# first byte for identity multibase and CIDv0 is 0x00
# putting in assumption that multibase for CIDv0 can not be identity
# refer: https://github.com/ipld/cid/issues/13#issuecomment-326490275
if cidbytes[0] != 0 and multibase.is_encoded(cidbytes):
# if the bytestream is multibase encoded
cid = multibase.decode(cidbytes)
if len(cid) < 2:
raise ValueError('cid length is invalid')
data = cid[1:]
version = int(cid[0])
codec = multicodec.get_codec(data)
multihash = multicodec.remove_prefix(data)
elif cidbytes[0] in (0, 1):
# if the bytestream is a CID
version = cidbytes[0]
data = cidbytes[1:]
codec = multicodec.get_codec(data)
multihash = multicodec.remove_prefix(data)
else:
# otherwise its just base58-encoded multihash
try:
version = 0
codec = CIDv0.CODEC
multihash = base58.b58decode(cidbytes)
except ValueError:
raise ValueError('multihash is not a valid base58 encoded multihash')
try:
mh.decode(multihash)
except ValueError:
raise
return make_cid(version, codec, multihash)
|
RPI-OPENEDX/edx-platform | refs/heads/RPI-DEV | openedx/core/djangoapps/credit/verification_access.py | 105 | """
Create in-course reverification access groups in a course.
We model the rules as a set of user partitions, one for each
verification checkpoint in a course.
For example, suppose that a course has two verification checkpoints,
one at midterm A and one at the midterm B.
Then the user partitions would look like this:
Midterm A: |-- ALLOW --|-- DENY --|
Midterm B: |-- ALLOW --|-- DENY --|
where the groups are defined as:
* ALLOW: The user has access to content gated by the checkpoint.
* DENY: The user does not have access to content gated by the checkpoint.
"""
import logging
from util.db import generate_int_id
from openedx.core.djangoapps.credit.utils import get_course_blocks
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.partitions.partitions import Group, UserPartition
log = logging.getLogger(__name__)
VERIFICATION_SCHEME_NAME = "verification"
VERIFICATION_BLOCK_CATEGORY = "edx-reverification-block"
def update_verification_partitions(course_key):
"""
Create a user partition for each verification checkpoint in the course.
This will modify the published version of the course descriptor.
It ensures that any in-course reverification XBlocks in the course
have an associated user partition. Other user partitions (e.g. cohorts)
will be preserved. Partitions associated with deleted reverification checkpoints
will be marked as inactive and will not be used to restrict access.
Arguments:
course_key (CourseKey): identifier for the course.
Returns:
None
"""
# Batch all the queries we're about to do and suppress
# the "publish" signal to avoid an infinite call loop.
with modulestore().bulk_operations(course_key, emit_signals=False):
# Retrieve all in-course reverification blocks in the course
icrv_blocks = get_course_blocks(course_key, VERIFICATION_BLOCK_CATEGORY)
# Update the verification definitions in the course descriptor
# This will also clean out old verification partitions if checkpoints
# have been deleted.
_set_verification_partitions(course_key, icrv_blocks)
def _unique_partition_id(course):
"""Return a unique user partition ID for the course. """
# Exclude all previously used IDs, even for partitions that have been disabled
# (e.g. if the course author deleted an in-course reverifification block but
# there are courseware components that reference the disabled partition).
used_ids = set(p.id for p in course.user_partitions)
return generate_int_id(used_ids=used_ids)
def _other_partitions(verified_partitions, exclude_partitions, course_key):
"""
Retrieve all partitions NOT associated with the current set of ICRV blocks.
Any partition associated with a deleted ICRV block will be marked as inactive
so its access rules will no longer be enforced.
Arguments:
all_partitions (list of UserPartition): All verified partitions defined in the course.
exclude_partitions (list of UserPartition): Partitions to exclude (e.g. the ICRV partitions already added)
course_key (CourseKey): Identifier for the course (used for logging).
Returns: list of `UserPartition`s
"""
results = []
partition_by_id = {
p.id: p for p in verified_partitions
}
other_partition_ids = set(p.id for p in verified_partitions) - set(p.id for p in exclude_partitions)
for pid in other_partition_ids:
partition = partition_by_id[pid]
results.append(
UserPartition(
id=partition.id,
name=partition.name,
description=partition.description,
scheme=partition.scheme,
parameters=partition.parameters,
groups=partition.groups,
active=False,
)
)
log.info(
(
"Disabled partition %s in course %s because the "
"associated in-course-reverification checkpoint does not exist."
),
partition.id, course_key
)
return results
def _set_verification_partitions(course_key, icrv_blocks):
"""
Create or update user partitions in the course.
Ensures that each ICRV block in the course has an associated user partition
with the groups ALLOW and DENY.
Arguments:
course_key (CourseKey): Identifier for the course.
icrv_blocks (list of XBlock): In-course reverification blocks, e.g. reverification checkpoints.
Returns:
list of UserPartition
"""
scheme = UserPartition.get_scheme(VERIFICATION_SCHEME_NAME)
if scheme is None:
log.error("Could not retrieve user partition scheme with ID %s", VERIFICATION_SCHEME_NAME)
return []
course = modulestore().get_course(course_key)
if course is None:
log.error("Could not find course %s", course_key)
return []
verified_partitions = course.get_user_partitions_for_scheme(scheme)
partition_id_for_location = {
p.parameters["location"]: p.id
for p in verified_partitions
if "location" in p.parameters
}
partitions = []
for block in icrv_blocks:
partition = UserPartition(
id=partition_id_for_location.get(
unicode(block.location),
_unique_partition_id(course)
),
name=block.related_assessment,
description=u"Verification checkpoint at {}".format(block.related_assessment),
scheme=scheme,
parameters={"location": unicode(block.location)},
groups=[
Group(scheme.ALLOW, "Completed verification at {}".format(block.related_assessment)),
Group(scheme.DENY, "Did not complete verification at {}".format(block.related_assessment)),
]
)
partitions.append(partition)
log.info(
(
"Configured partition %s for course %s using a verified partition scheme "
"for the in-course-reverification checkpoint at location %s"
),
partition.id,
course_key,
partition.parameters["location"]
)
# Preserve existing, non-verified partitions from the course
# Mark partitions for deleted in-course reverification as disabled.
partitions += _other_partitions(verified_partitions, partitions, course_key)
course.set_user_partitions_for_scheme(partitions, scheme)
modulestore().update_item(course, ModuleStoreEnum.UserID.system)
log.info("Saved updated partitions for the course %s", course_key)
return partitions
|
ppiotr/Bibedit-some-refactoring | refs/heads/bibedit-hp-change-to-field-with-many-instances | modules/websubmit/web/publiline.py | 3 | ## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
publiline_complex.py -- implementes ...
actors in this process are:
1. author -- subilmts ...
2. edi
3; ref
Il ne faut pas oublier de definir les roles...
"""
__revision__ = "$Id$"
## import interesting modules:
import string
import os
import sys
import time
import types
import re
import shutil
from invenio.config import \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_SITE_ADMIN_EMAIL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_PYLIBDIR, \
CFG_WEBSUBMIT_STORAGEDIR, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_SECURE_URL, \
CFG_VERSION
from invenio.dbquery import run_sql, Error, OperationalError
from invenio.access_control_engine import acc_authorize_action
from invenio.access_control_admin import *
from invenio.webpage import page, create_error_box
from invenio.webuser import getUid, get_email, list_registered_users, page_not_authorized
from invenio.messages import gettext_set_language, wash_language
from invenio.websubmit_config import *
from invenio.search_engine import search_pattern, get_fieldvalues
from invenio.websubmit_functions.Retrieve_Data import Get_Field
from invenio.mailutils import send_email
from invenio.urlutils import wash_url_argument
from invenio.webgroup_dblayer import get_group_infos, insert_new_group, insert_new_member, delete_member
from invenio.webaccessadmin_lib import cleanstring_email
from invenio.access_control_config import MAXSELECTUSERS
from invenio.access_control_admin import acc_get_user_email
from invenio.webmessage import perform_request_send, perform_request_write_with_search
import invenio.webbasket_dblayer as basketdb
from invenio.webbasket_config import CFG_WEBBASKET_SHARE_LEVELS, CFG_WEBBASKET_CATEGORIES, CFG_WEBBASKET_SHARE_LEVELS_ORDERED
from invenio.websubmit_functions.Retrieve_Data import Get_Field
from invenio.errorlib import register_exception
from invenio.bibrecord import create_records, record_get_field_value, record_get_field_values
execfile("%s/invenio/websubmit_functions/Retrieve_Data.py" % CFG_PYLIBDIR)
import invenio.template
websubmit_templates = invenio.template.load('websubmit')
CFG_WEBSUBMIT_PENDING_DIR = "%s/pending" % CFG_WEBSUBMIT_STORAGEDIR
CFG_WEBSUBMIT_DUMMY_MARC_XML_REC = "dummy_marcxml_rec"
CFG_WEBSUBMIT_MARC_XML_REC = "recmysql"
def perform_request_save_comment(*args, **kwargs):
"""
FIXME: this function is a dummy workaround for the obsoleted
function calls below. Should get deleted at the same time as
them.
"""
return
def index(req,c=CFG_SITE_NAME,ln=CFG_SITE_LANG,doctype="",categ="",RN="",send="",flow="",apptype="", action="", email_user_pattern="", id_user="", id_user_remove="", validate="", id_user_val="", msg_subject="", msg_body="", reply="", commentId=""):
global uid
ln = wash_language(ln)
categ = wash_url_argument(categ, 'str')
RN = wash_url_argument(RN, 'str')
send = wash_url_argument(send, 'str')
flow = wash_url_argument(flow, 'str')
apptype = wash_url_argument(apptype, 'str')
action = wash_url_argument(action, 'str')
email_user_pattern = wash_url_argument(email_user_pattern, 'str')
id_user = wash_url_argument(id_user, 'int')
id_user_remove = wash_url_argument(id_user_remove, 'int')
validate = wash_url_argument(validate, 'str')
id_user_val = wash_url_argument(id_user_val, 'int')
msg_subject = wash_url_argument(msg_subject, 'str')
msg_body = wash_url_argument(msg_body, 'str')
reply = wash_url_argument(reply, 'str')
commentId = wash_url_argument(commentId, 'str')
# load the right message language
_ = gettext_set_language(ln)
t=""
# get user ID:
try:
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../publiline.py/index",
navmenuid='yourapprovals')
uid_email = get_email(uid)
except Error, e:
return errorMsg(str(e),req, ln = ln)
if flow == "cplx":
if doctype == "":
t = selectCplxDoctype(ln)
elif (categ == "") or (apptype == ""):
t = selectCplxCateg(doctype, ln)
elif RN == "":
t = selectCplxDocument(doctype, categ, apptype, ln)
elif action == "":
t = displayCplxDocument(req, doctype, categ, RN, apptype, reply, commentId, ln)
else:
t = doCplxAction(req, doctype, categ, RN, apptype, action, email_user_pattern, id_user, id_user_remove, validate, id_user_val, msg_subject, msg_body, reply, commentId, ln)
return page(title=_("Document Approval Workflow"),
navtrail= """<a class="navtrail" href="%(sitesecureurl)s/youraccount/display">%(account)s</a>""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'account' : _("Your Account"),
},
body=t,
description="",
keywords="",
uid=uid,
language=ln,
req=req,
navmenuid='yourapprovals')
else:
if doctype == "":
t = selectDoctype(ln)
elif categ == "":
t = selectCateg(doctype, ln)
elif RN == "":
t = selectDocument(doctype, categ, ln)
else:
t = displayDocument(req, doctype, categ, RN, send, ln)
return page(title=_("Approval and Refereeing Workflow"),
navtrail= """<a class="navtrail" href="%(sitesecureurl)s/youraccount/display">%(account)s</a>""" % {
'sitesecureurl' : CFG_SITE_SECURE_URL,
'account' : _("Your Account"),
},
body=t,
description="",
keywords="",
uid=uid,
language=ln,
req=req,
navmenuid='yourapprovals')
def selectDoctype(ln = CFG_SITE_LANG):
res = run_sql("select DISTINCT doctype from sbmAPPROVAL")
docs = []
for row in res:
res2 = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s", (row[0],))
docs.append({
'doctype' : row[0],
'docname' : res2[0][0],
})
t = websubmit_templates.tmpl_publiline_selectdoctype(
ln = ln,
docs = docs,
)
return t
def selectCplxDoctype(ln = CFG_SITE_LANG):
res = run_sql("select DISTINCT doctype from sbmCPLXAPPROVAL")
docs = []
for row in res:
res2 = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s", (row[0],))
docs.append({
'doctype' : row[0],
'docname' : res2[0][0],
})
t = websubmit_templates.tmpl_publiline_selectcplxdoctype(
ln = ln,
docs = docs,
)
return t
def selectCateg(doctype, ln = CFG_SITE_LANG):
t=""
res = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s",(doctype,))
title = res[0][0]
sth = run_sql("select * from sbmCATEGORIES where doctype=%s order by lname",(doctype,))
if len(sth) == 0:
categ = "unknown"
return selectDocument(doctype,categ, ln = ln)
categories = []
for arr in sth:
waiting = 0
rejected = 0
approved = 0
sth2 = run_sql("select COUNT(*) from sbmAPPROVAL where doctype=%s and categ=%s and status='waiting'", (doctype,arr[1],))
waiting = sth2[0][0]
sth2 = run_sql("select COUNT(*) from sbmAPPROVAL where doctype=%s and categ=%s and status='approved'",(doctype,arr[1],))
approved = sth2[0][0]
sth2 = run_sql("select COUNT(*) from sbmAPPROVAL where doctype=%s and categ=%s and status='rejected'",(doctype,arr[1],))
rejected = sth2[0][0]
categories.append({
'waiting' : waiting,
'approved' : approved,
'rejected' : rejected,
'id' : arr[1],
})
t = websubmit_templates.tmpl_publiline_selectcateg(
ln = ln,
categories = categories,
doctype = doctype,
title = title,
)
return t
def selectCplxCateg(doctype, ln = CFG_SITE_LANG):
t=""
res = run_sql("SELECT ldocname FROM sbmDOCTYPE WHERE sdocname=%s",(doctype,))
title = res[0][0]
sth = run_sql("SELECT * FROM sbmCATEGORIES WHERE doctype=%s ORDER BY lname",(doctype,))
if len(sth) == 0:
categ = "unknown"
return selectCplxDocument(doctype,categ, "", ln = ln)
types = {}
for apptype in ('RRP', 'RPB', 'RDA'):
for arr in sth:
info = {'id' : arr[1],
'desc' : arr[2],}
for status in ('waiting', 'rejected', 'approved', 'cancelled'):
info[status] = __db_count_doc (doctype, arr[1], status, apptype)
types.setdefault (apptype, []).append(info)
t = websubmit_templates.tmpl_publiline_selectcplxcateg(
ln = ln,
types = types,
doctype = doctype,
title = title,
)
return t
def selectDocument(doctype,categ, ln = CFG_SITE_LANG):
t=""
res = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s", (doctype,))
title = res[0][0]
if categ == "":
categ == "unknown"
docs = []
sth = run_sql("select rn,status from sbmAPPROVAL where doctype=%s and categ=%s order by status DESC,rn DESC",(doctype,categ))
for arr in sth:
docs.append({
'RN' : arr[0],
'status' : arr[1],
})
t = websubmit_templates.tmpl_publiline_selectdocument(
ln = ln,
doctype = doctype,
title = title,
categ = categ,
docs = docs,
)
return t
def selectCplxDocument(doctype,categ,apptype, ln = CFG_SITE_LANG):
t=""
res = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s", (doctype,))
title = res[0][0]
sth = run_sql("select lname from sbmCATEGORIES where doctype=%s and sname=%s order by lname",(doctype,categ,))
if len(sth) != 0:
categname = sth[0][0]
else:
categname = "Unknown"
docs = []
sth = run_sql("select rn,status from sbmCPLXAPPROVAL where doctype=%s and categ=%s and type=%s order by status DESC,rn DESC",(doctype,categ,apptype))
for arr in sth:
docs.append({
'RN' : arr[0],
'status' : arr[1],
})
t = websubmit_templates.tmpl_publiline_selectcplxdocument(
ln = ln,
doctype = doctype,
title = title,
categ = categ,
categname = categname,
docs = docs,
apptype = apptype,
)
return t
def displayDocument(req, doctype,categ,RN,send, ln = CFG_SITE_LANG):
# load the right message language
_ = gettext_set_language(ln)
t=""
res = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s", (doctype,))
docname = res[0][0]
if categ == "":
categ = "unknown"
sth = run_sql("select rn,status,dFirstReq,dLastReq,dAction,access,note from sbmAPPROVAL where rn=%s",(RN,))
if len(sth) > 0:
arr = sth[0]
rn = arr[0]
status = arr[1]
dFirstReq = arr[2]
dLastReq = arr[3]
dAction = arr[4]
access = arr[5]
note = arr[6]
else:
return _("Approval has never been requested for this document.") + "<br /> "
## Get the details of the pending item:
item_details = get_pending_item_details(doctype, RN)
## get_pending_item_details has returned either None or a dictionary
## with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
if item_details is not None:
authors = ", ".join(item_details['authors'])
newrn = item_details['report-number']
title = item_details['title']
sysno = item_details['recid']
else:
## FIXME!
## For backward compatibility reasons, it we failed to find the item's
## details, we will try the old way, which includes searching for files
## like TI, TIF in the submission's working directory.
## This is not nice and should be removed.
try:
(authors,title,sysno,newrn) = getInfo(doctype,categ,RN)
except TypeError:
return _("Unable to display document.")
confirm_send = 0
if send == _("Send Again"):
if authors == "unknown" or title == "unknown":
SendWarning(doctype,categ,RN,title,authors,access, ln = ln)
else:
# @todo - send in different languages
SendEnglish(doctype,categ,RN,title,authors,access,sysno)
run_sql("update sbmAPPROVAL set dLastReq=NOW() where rn=%s",(RN,))
confirm_send = 1
if status == "waiting":
if categ == "unknown":
## FIXME: This was necessary for document types without categories,
## such as DEMOBOO:
categ = "*"
(auth_code, auth_message) = acc_authorize_action(req, "referee",verbose=0,doctype=doctype, categ=categ)
else:
(auth_code, auth_message) = (None, None)
t = websubmit_templates.tmpl_publiline_displaydoc(
ln = ln,
docname = docname,
doctype = doctype,
categ = categ,
rn = rn,
status = status,
dFirstReq = dFirstReq,
dLastReq = dLastReq,
dAction = dAction,
access = access,
confirm_send = confirm_send,
auth_code = auth_code,
auth_message = auth_message,
authors = authors,
title = title,
sysno = sysno,
newrn = newrn,
note = note,
)
return t
def displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln = CFG_SITE_LANG):
# load the right message language
_ = gettext_set_language(ln)
t=""
uid = getUid(req)
res = run_sql("select ldocname from sbmDOCTYPE where sdocname=%s", (doctype,))
docname = res[0][0]
if categ == "":
categ = "unknown"
key = (RN, apptype)
infos = __db_get_infos (key)
if len(infos) > 0:
(status, id_group, id_bskBASKET, id_EdBoardGroup,
dFirstReq,dLastReq,dEdBoardSel, dRefereeSel, dRefereeRecom, dEdBoardRecom, dPubComRecom, dProjectLeaderAction) = infos[0]
dates = {'dFirstReq' : dFirstReq,
'dLastReq' : dLastReq,
'dEdBoardSel' : dEdBoardSel,
'dRefereeSel' : dRefereeSel,
'dRefereeRecom' : dRefereeRecom,
'dEdBoardRecom' : dEdBoardRecom,
'dPubComRecom' : dPubComRecom,
'dProjectLeaderAction' : dProjectLeaderAction,
}
else:
return _("Approval has never been requested for this document.") + "<br /> "
## Removing call to deprecated "getInAlice" function and replacing it with
## a call to the newer "get_brief_doc_details_from_repository" function:
## try:
## (authors,title,sysno,newrn) = getInAlice(doctype,categ,RN)
## except TypeError:
## return _("Unable to display document.")
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
if item_details is not None:
## Details of the item were found in the CDS Invenio repository
authors = ", ".join(item_details['authors'])
newrn = item_details['report-number']
title = item_details['title']
sysno = item_details['recid']
else:
## Can't find any document details.
return _("Unable to display document.")
if status == "waiting":
isPubCom = __is_PubCom (req, doctype)
isEdBoard = __is_EdBoard (uid, id_EdBoardGroup)
isReferee = __is_Referee (uid, id_bskBASKET)
isProjectLeader = __is_ProjectLeader (req, doctype, categ)
isAuthor = __is_Author (uid, sysno)
else:
isPubCom = None
isEdBoard = None
isReferee = None
isProjectLeader = None
isAuthor = None
t += websubmit_templates.tmpl_publiline_displaycplxdoc(
ln = ln,
docname = docname,
doctype = doctype,
categ = categ,
rn = RN,
apptype = apptype,
status = status,
dates = dates,
isPubCom = isPubCom,
isEdBoard = isEdBoard,
isReferee = isReferee,
isProjectLeader = isProjectLeader,
isAuthor = isAuthor,
authors = authors,
title = title,
sysno = sysno,
newrn = newrn,
)
if id_bskBASKET > 0:
rights = basketdb.get_max_user_rights_on_basket(uid, id_bskBASKET)
if not(__check_basket_sufficient_rights(rights, CFG_WEBBASKET_SHARE_LEVELS['READITM'])):
return t
comments = basketdb.get_comments(id_bskBASKET, sysno)
if dProjectLeaderAction != None:
user_can_add_comment = 0
else:
user_can_add_comment = __check_basket_sufficient_rights(rights, CFG_WEBBASKET_SHARE_LEVELS['ADDCMT'])
comment_subject = ""
comment_body = ""
if reply == "true":
#Get the message subject and body from the comment
for comment in comments:
if str(commentId) == str(comment[0]):
comment_subject = comment[2]
comment_body = comment[3]
comment_subject = comment_subject.lstrip("Re: ")
comment_subject = "Re: " + comment_subject
comment_body = "> " + comment_body.replace("\n", "\n> ")
t += websubmit_templates.tmpl_publiline_displaycplxdocitem(
doctype, categ, RN, apptype, "AddComment",
comments,
(__check_basket_sufficient_rights(rights, CFG_WEBBASKET_SHARE_LEVELS['READCMT']),
user_can_add_comment,
__check_basket_sufficient_rights(rights, CFG_WEBBASKET_SHARE_LEVELS['DELCMT'])),
selected_category=CFG_WEBBASKET_CATEGORIES['GROUP'], selected_topic=0, selected_group_id=id_group,
comment_subject=comment_subject, comment_body=comment_body, ln=ln)
return t
def __check_basket_sufficient_rights(rights_user_has, rights_needed):
"""Private function, check if the rights are sufficient."""
try:
out = CFG_WEBBASKET_SHARE_LEVELS_ORDERED.index(rights_user_has) >= \
CFG_WEBBASKET_SHARE_LEVELS_ORDERED.index(rights_needed)
except ValueError:
out = 0
return out
def __is_PubCom (req,doctype):
(isPubCom, auth_message) = acc_authorize_action(req, "pubcomchair",verbose=0,doctype=doctype)
return isPubCom
def __is_EdBoard (uid, id_EdBoardGroup):
isEdBoard = None
if id_EdBoardGroup > 0:
edBoard = run_sql("""SELECT u.id
FROM user u LEFT JOIN user_usergroup ug ON u.id = ug.id_user
WHERE ug.id_usergroup = '%s' and user_status != 'A' AND user_status != 'P'""" % (id_EdBoardGroup, ))
for uid_scan in edBoard:
if uid == uid_scan[0]:
isEdBoard = 0
break
return isEdBoard
def __is_Referee (uid, id_bskBASKET):
isReferee = None
if id_bskBASKET > 0:
if basketdb.check_user_owns_baskets (uid, id_bskBASKET) == 1:
isReferee = 0
return isReferee
def __is_ProjectLeader (req, doctype, categ):
(isProjectLeader, auth_message) = acc_authorize_action(req, "projectleader",verbose=0,doctype=doctype,categ=categ)
return isProjectLeader
def __is_Author (uid, sysno):
email = Get_Field("8560_f",sysno)
email = re.sub("[\n\r ]+","",email)
uid_email = re.sub("[\n\r ]+","", acc_get_user_email(uid))
isAuthor = None
if (re.search(uid_email,email,re.IGNORECASE) != None) and (uid_email != ""):
isAuthor = 0
return isAuthor
def __db_count_doc (doctype, categ, status, apptype):
return run_sql("SELECT COUNT(*) FROM sbmCPLXAPPROVAL WHERE doctype=%s AND categ=%s AND status=%s AND type=%s",(doctype,categ,status,apptype,))[0][0]
def __db_get_infos (key):
return run_sql("SELECT status,id_group,id_bskBASKET,id_EdBoardGroup,dFirstReq,dLastReq,dEdBoardSel,dRefereeSel,dRefereeRecom,dEdBoardRecom,dPubComRecom,dProjectLeaderAction FROM sbmCPLXAPPROVAL WHERE rn=%s and type=%s", key)
def __db_set_EdBoardSel_time (key):
run_sql("UPDATE sbmCPLXAPPROVAL SET dEdBoardSel=NOW() WHERE rn=%s and type=%s", key)
def __db_check_EdBoardGroup ((RN,apptype), id_EdBoardGroup, uid, group_descr):
res = get_group_infos (id_EdBoardGroup)
if len(res) == 0:
id_EdBoardGroup = insert_new_group (uid, RN, group_descr % RN, "VM")
run_sql("UPDATE sbmCPLXAPPROVAL SET id_EdBoardGroup=%s WHERE rn=%s and type=%s", (id_EdBoardGroup,RN,apptype,))
return id_EdBoardGroup
def __db_set_basket ((RN,apptype), id_bsk):
run_sql("UPDATE sbmCPLXAPPROVAL SET id_bskBASKET=%s, dRefereeSel=NOW() WHERE rn=%s and type=%s", (id_bsk,RN,apptype,))
def __db_set_RefereeRecom_time (key):
run_sql("UPDATE sbmCPLXAPPROVAL SET dRefereeRecom=NOW() WHERE rn=%s and type=%s", key)
def __db_set_EdBoardRecom_time (key):
run_sql("UPDATE sbmCPLXAPPROVAL SET dEdBoardRecom=NOW() WHERE rn=%s and type=%s", key)
def __db_set_PubComRecom_time (key):
run_sql("UPDATE sbmCPLXAPPROVAL SET dPubComRecom=NOW() WHERE rn=%s and type=%s", key)
def __db_set_status ((RN,apptype), status):
run_sql("UPDATE sbmCPLXAPPROVAL SET status=%s, dProjectLeaderAction=NOW() WHERE rn=%s and type=%s", (status,RN,apptype,))
def doCplxAction(req, doctype, categ, RN, apptype, action, email_user_pattern, id_user, id_user_remove, validate, id_user_val, msg_subject, msg_body, reply, commentId, ln=CFG_SITE_LANG):
"""
Perform complex action. Note: all argume,ts are supposed to be washed already.
Return HTML body for the paget.
In case of errors, deletes hard drive. ;-)
"""
# load the right message language
_ = gettext_set_language(ln)
TEXT_RSN_RefereeSel_BASKET_DESCR = "Requests for refereeing process"
TEXT_RSN_RefereeSel_MSG_REFEREE_SUBJECT = "Referee selection"
TEXT_RSN_RefereeSel_MSG_REFEREE_BODY = "You have been named as a referee for this document :"
TEXT_RSN_RefereeSel_MSG_GROUP_SUBJECT = "Please, review this publication"
TEXT_RSN_RefereeSel_MSG_GROUP_BODY = "Please, review the following publication"
TEXT_RSN_RefereeRecom_MSG_PUBCOM_SUBJECT = "Final recommendation from the referee"
TEXT_RSN_PubComRecom_MSG_PRJLEADER_SUBJECT = "Final recommendation from the publication board : "
TEXT_RSN_ProjectLeaderDecision_MSG_SUBJECT = "Final decision from the project leader"
TEXT_RPB_EdBoardSel_MSG_EDBOARD_SUBJECT = "You have been selected in a editorial board"
TEXT_RPB_EdBoardSel_MSG_EDBOARD_BODY = "You have been selected as a member of the editorial board of this document :"
TEXT_RPB_EdBoardSel_EDBOARD_GROUP_DESCR = "Editorial board for %s"
TEXT_RPB_RefereeSel_BASKET_DESCR = "Requests for publication"
TEXT_RPB_RefereeSel_MSG_REFEREE_SUBJECT = "Referee selection"
TEXT_RPB_RefereeSel_MSG_REFEREE_BODY = "You have been named as a referee for this document :"
TEXT_RPB_RefereeSel_MSG_GROUP_SUBJECT = "Please, review this publication"
TEXT_RPB_RefereeSel_MSG_GROUP_BODY = "Please, review the following publication"
TEXT_RPB_RefereeRecom_MSG_EDBOARD_SUBJECT = "Final recommendation from the referee"
TEXT_RPB_EdBoardRecom_MSG_PUBCOM_SUBJECT = "Final recommendation from the editorial board"
TEXT_RPB_PubComRecom_MSG_PRJLEADER_SUBJECT = "Final recommendation from the publication board"
TEXT_RPB_ProjectLeaderDecision_MSG_SUBJECT = "Final decision from the project leader"
t=""
uid = getUid(req)
if categ == "":
categ = "unknown"
key = (RN, apptype)
infos = __db_get_infos (key)
if len(infos) > 0:
(status, id_group, id_bskBASKET, id_EdBoardGroup, dummy, dummy,
dEdBoardSel, dRefereeSel, dRefereeRecom, dEdBoardRecom, dPubComRecom, dProjectLeaderAction) = infos[0]
else:
return _("Approval has never been requested for this document.") + "<br /> "
## Removing call to deprecated "getInAlice" function and replacing it with
## a call to the newer "get_brief_doc_details_from_repository" function:
## try:
## (authors,title,sysno,newrn) = getInAlice(doctype,categ,RN)
## except TypeError:
## return _("Unable to display document.")
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
if item_details is not None:
## Details of the item were found in the CDS Invenio repository
authors = ", ".join(item_details['authors'])
newrn = item_details['report-number']
title = item_details['title']
sysno = item_details['recid']
else:
## Can't find any document details.
return _("Unable to display document.")
if (action == "EdBoardSel") and (apptype == "RPB"):
if __is_PubCom (req, doctype) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
if validate == "go":
if dEdBoardSel == None:
__db_set_EdBoardSel_time (key)
perform_request_send (uid, "", RN, TEXT_RPB_EdBoardSel_MSG_EDBOARD_SUBJECT, TEXT_RPB_EdBoardSel_MSG_EDBOARD_BODY)
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
id_EdBoardGroup = __db_check_EdBoardGroup (key, id_EdBoardGroup, uid, TEXT_RPB_EdBoardSel_EDBOARD_GROUP_DESCR)
subtitle1 = _('Adding users to the editorial board')
# remove letters not allowed in an email
email_user_pattern = cleanstring_email(email_user_pattern)
stopon1 = ""
stopon2 = ""
stopon3 = ""
users = []
extrausers = []
# pattern is entered
if email_user_pattern:
# users with matching email-address
try:
users1 = run_sql("""SELECT id, email FROM user WHERE email<>'' AND email RLIKE %s ORDER BY email """, (email_user_pattern, ))
except OperationalError:
users1 = ()
# users that are connected
try:
users2 = run_sql("""SELECT DISTINCT u.id, u.email
FROM user u LEFT JOIN user_usergroup ug ON u.id = ug.id_user
WHERE u.email<>'' AND ug.id_usergroup = %s AND u.email RLIKE %s
ORDER BY u.email """, (id_EdBoardGroup, email_user_pattern))
except OperationalError:
users2 = ()
# no users that match the pattern
if not (users1 or users2):
stopon1 = '<p>%s</p>' % _("no qualified users, try new search.")
elif len(users1) > MAXSELECTUSERS:
stopon1 = '<p><strong>%s %s</strong>, %s (%s %s)</p>' % (len(users1), _("hits"), _("too many qualified users, specify more narrow search."), _("limit"), MAXSELECTUSERS)
# show matching users
else:
users = []
extrausers = []
for (user_id, email) in users1:
if (user_id, email) not in users2: users.append([user_id,email,''])
for (user_id, email) in users2:
extrausers.append([-user_id, email,''])
try: id_user = int(id_user)
except ValueError: pass
# user selected already connected to role
email_out = acc_get_user_email(id_user)
if id_user < 0:
stopon2 = '<p>%s</p>' % _("users in brackets are already attached to the role, try another one...")
# a user is selected
elif email_out:
result = insert_new_member(id_user, id_EdBoardGroup, "M")
stopon2 = '<p>confirm: user <strong>%s</strong> added to the editorial board.</p>' % (email_out, )
subtitle2 = _('Removing users from the editorial board')
usersremove = run_sql("""SELECT DISTINCT u.id, u.email
FROM user u LEFT JOIN user_usergroup ug ON u.id = ug.id_user
WHERE u.email <> "" AND ug.id_usergroup = %s and user_status != 'A' AND user_status != 'P'
ORDER BY u.email """, (id_EdBoardGroup, ))
try: id_user_remove = int(id_user_remove)
except ValueError: pass
# user selected already connected to role
email_out = acc_get_user_email(id_user_remove)
# a user is selected
if email_out:
result = delete_member(id_EdBoardGroup, id_user_remove)
stopon3 = '<p>confirm: user <strong>%s</strong> removed from the editorial board.</p>' % (email_out, )
t = websubmit_templates.tmpl_publiline_displaydocplxaction (
ln = ln,
doctype = doctype,
categ = categ,
rn = RN,
apptype = apptype,
action = action,
status = status,
authors = authors,
title = title,
sysno = sysno,
subtitle1 = subtitle1,
email_user_pattern = email_user_pattern,
stopon1 = stopon1,
users = users,
extrausers = extrausers,
stopon2 = stopon2,
subtitle2 = subtitle2,
usersremove = usersremove,
stopon3 = stopon3,
validate_btn = _("Validate the editorial board selection"),
)
return t
elif (action == "RefereeSel") and ((apptype == "RRP") or (apptype == "RPB")):
if apptype == "RRP":
to_check = __is_PubCom (req, doctype)
TEXT_RefereeSel_BASKET_DESCR = TEXT_RSN_RefereeSel_BASKET_DESCR
TEXT_RefereeSel_MSG_REFEREE_SUBJECT = TEXT_RSN_RefereeSel_MSG_REFEREE_SUBJECT
TEXT_RefereeSel_MSG_REFEREE_BODY = TEXT_RSN_RefereeSel_MSG_REFEREE_BODY + " " + "\"" + item_details['title'] + "\""
TEXT_RefereeSel_MSG_GROUP_SUBJECT = TEXT_RSN_RefereeSel_MSG_GROUP_SUBJECT
TEXT_RefereeSel_MSG_GROUP_BODY = TEXT_RSN_RefereeSel_MSG_GROUP_BODY + " " + "\"" + item_details['title'] + "\""
elif apptype == "RPB":
to_check = __is_EdBoard (uid, id_EdBoardGroup)
TEXT_RefereeSel_BASKET_DESCR = TEXT_RSN_RefereeSel_BASKET_DESCR
TEXT_RefereeSel_MSG_REFEREE_SUBJECT = TEXT_RSN_RefereeSel_MSG_REFEREE_SUBJECT
TEXT_RefereeSel_MSG_REFEREE_BODY = TEXT_RSN_RefereeSel_MSG_REFEREE_BODY + " " + "\"" + item_details['title'] + "\""
TEXT_RefereeSel_MSG_GROUP_SUBJECT = TEXT_RSN_RefereeSel_MSG_GROUP_SUBJECT
TEXT_RefereeSel_MSG_GROUP_BODY = TEXT_RSN_RefereeSel_MSG_GROUP_BODY + " " + "\"" + item_details['title'] + "\""
else:
to_check = None
if to_check != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
if validate == "go":
if dRefereeSel == None:
id_bsk = basketdb.create_basket (int(id_user_val), RN, TEXT_RefereeSel_BASKET_DESCR)
basketdb.share_basket_with_group (id_bsk, id_group, CFG_WEBBASKET_SHARE_LEVELS['ADDCMT'])
basketdb.add_to_basket (int(id_user_val), (sysno, ), (id_bsk, ))
__db_set_basket (key, id_bsk)
email_address = run_sql("""SELECT email FROM user WHERE id = %s """, (id_user_val, ))[0][0]
perform_request_send (uid, email_address, "", TEXT_RefereeSel_MSG_REFEREE_SUBJECT, TEXT_RefereeSel_MSG_REFEREE_BODY, 0, 0, 0, ln, 1)
sendMailToReferee(doctype,categ,RN,email_address,authors)
group_name = run_sql("""SELECT name FROM usergroup WHERE id = %s""", (id_group, ))[0][0]
perform_request_send (int(id_user_val), "", group_name, TEXT_RefereeSel_MSG_GROUP_SUBJECT, TEXT_RefereeSel_MSG_GROUP_BODY)
sendMailToGroup(doctype,categ,RN,id_group,authors)
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
subtitle1 = _('Referee selection')
# remove letters not allowed in an email
email_user_pattern = cleanstring_email(email_user_pattern)
stopon1 = ""
stopon2 = ""
users = []
extrausers = []
# pattern is entered
if email_user_pattern:
# users with matching email-address
try:
users1 = run_sql("""SELECT id, email FROM user WHERE email <> "" AND email RLIKE %s ORDER BY email """, (email_user_pattern, ))
except OperationalError:
users1 = ()
# no users that match the pattern
if not users1:
stopon1 = '<p>%s</p>' % _("no qualified users, try new search.")
elif len(users1) > MAXSELECTUSERS:
stopon1 = '<p><strong>%s %s</strong>, %s (%s %s)</p>' % (len(users1), _("hits"), _("too many qualified users, specify more narrow search."), _("limit"), MAXSELECTUSERS)
# show matching users
else:
users = []
for (user_id, email) in users1:
users.append([user_id,email,''])
try: id_user = int(id_user)
except ValueError: pass
# user selected already connected to role
email_out = acc_get_user_email(id_user)
# a user is selected
if email_out:
stopon2 = """<p>user <strong>%s</strong> will be the referee ?
<input type="hidden" name="id_user_val" value="%s" />
<input type="hidden" name="validate" value="go" />
<input class="adminbutton" type="submit" value="Validate the referee selection" />
</p>""" % (email_out, id_user)
t = websubmit_templates.tmpl_publiline_displaydocplxaction (
ln = ln,
doctype = doctype,
categ = categ,
rn = RN,
apptype = apptype,
action = action,
status = status,
authors = authors,
title = title,
sysno = sysno,
subtitle1 = subtitle1,
email_user_pattern = email_user_pattern,
stopon1 = stopon1,
users = users,
extrausers = [],
stopon2 = stopon2,
subtitle2 = "",
usersremove = [],
stopon3 = "",
validate_btn = "",
)
return t
elif (action == "AddAuthorList") and (apptype == "RPB"):
return ""
elif (action == "AddComment") and ((apptype == "RRP") or (apptype == "RPB")):
t = ""
if validate == "go":
(errors, infos) = perform_request_save_comment (uid, id_bskBASKET, sysno, msg_subject, msg_body, ln)
t += "%(infos)s<br /><br />" % {'infos' : infos[0]}
t += """
<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="submit" class="formbutton" value="%(button_label)s" />
</form>""" % {'doctype' : doctype,
'categ' : categ,
'rn' : RN,
'apptype' : apptype,
'button_label' : _("Come back to the document"),
}
return t
elif (action == "RefereeRecom") and ((apptype == "RRP") or (apptype == "RPB")):
if __is_Referee (uid, id_bskBASKET) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
if apptype == "RRP":
# Build publication committee chair's email address
user_addr = ""
# Try to retrieve the publication committee chair's email from the role database
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_%s" % (doctype,categ))):
user_addr += run_sql("""SELECT email FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
# And if there are general publication committee chair's
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_*" % doctype)):
user_addr += run_sql("""SELECT email FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
user_addr = re.sub(",$","",user_addr)
group_addr = ""
TEXT_RefereeRecom_MSG_SUBJECT = TEXT_RSN_RefereeRecom_MSG_PUBCOM_SUBJECT
elif apptype == "RPB":
user_addr = ""
group_addr = RN
TEXT_RefereeRecom_MSG_SUBJECT = TEXT_RPB_RefereeRecom_MSG_EDBOARD_SUBJECT
else:
user_addr = ""
group_addr = ""
TEXT_RefereeRecom_MSG_SUBJECT = ""
if validate == "approve" or validate == "reject":
if dRefereeRecom == None:
perform_request_send (uid, user_addr, group_addr, msg_subject, msg_body, 0, 0, 0, ln, 1)
if validate == "approve":
msg_body = "Approved : " + msg_body
else:
msg_body = "Rejected : " + msg_body
#Get the Project Leader's email address
# email = ""
# for user in acc_get_role_users(acc_get_role_id("projectleader_%s_%s" % (doctype,categ))):
# email += run_sql("""SELECT email FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
# sendMailToProjectLeader(doctype, categ, RN, email, authors, "referee", msg_body)
sendMailtoCommitteeChair(doctype, categ, RN, user_addr, authors)
__db_set_RefereeRecom_time (key)
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
t = websubmit_templates.tmpl_publiline_displaycplxrecom (
ln = ln,
doctype = doctype,
categ = categ,
rn = RN,
apptype = apptype,
action = action,
status = status,
authors = authors,
title = title,
sysno = sysno,
msg_to = user_addr,
msg_to_group = group_addr,
msg_subject = TEXT_RefereeRecom_MSG_SUBJECT,
)
return t
elif (action == "EdBoardRecom") and (apptype == "RPB"):
if __is_EdBoard (uid, id_EdBoardGroup) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
# Build publication committee chair's email address
user_addr = ""
# Try to retrieve the publication committee chair's email from the role database
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_%s" % (doctype,categ))):
user_addr += run_sql("""SELECT nickname FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
# And if there are general publication committee chair's
for user in acc_get_role_users(acc_get_role_id("pubcomchair_%s_*" % doctype)):
user_addr += run_sql("""SELECT nickname FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
user_addr = re.sub(",$","",user_addr)
if validate == "go":
if dEdBoardRecom == None:
perform_request_send (uid, user_addr, "", msg_subject, msg_body)
__db_set_EdBoardRecom_time (key)
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
t = websubmit_templates.tmpl_publiline_displaycplxrecom (
ln = ln,
doctype = doctype,
categ = categ,
rn = RN,
apptype = apptype,
action = action,
status = status,
authors = authors,
title = title,
sysno = sysno,
msg_to = user_addr,
msg_to_group = "",
msg_subject = TEXT_RPB_EdBoardRecom_MSG_PUBCOM_SUBJECT,
)
return t
elif (action == "PubComRecom") and ((apptype == "RRP") or (apptype == "RPB")):
if __is_PubCom (req, doctype) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
# Build project leader's email address
user_addr = ""
# Try to retrieve the project leader's email from the role database
for user in acc_get_role_users(acc_get_role_id("projectleader_%s_%s" % (doctype,categ))):
user_addr += run_sql("""SELECT email FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
# And if there are general project leader's
for user in acc_get_role_users(acc_get_role_id("projectleader_%s_*" % doctype)):
user_addr += run_sql("""SELECT email FROM user WHERE id = %s """, (user[0], ))[0][0] + ","
user_addr = re.sub(",$","",user_addr)
if apptype == "RRP":
TEXT_PubComRecom_MSG_SUBJECT = TEXT_RSN_PubComRecom_MSG_PRJLEADER_SUBJECT
elif apptype == "RPB":
group_addr = RN
TEXT_PubComRecom_MSG_SUBJECT = TEXT_RPB_PubComRecom_MSG_PRJLEADER_SUBJECT
else:
TEXT_PubComRecom_MSG_SUBJECT = ""
if validate == "approve" or validate == "reject":
if validate == "approve":
msg_body = "Approved : " + msg_body
else:
msg_body = "Rejected : " + msg_body
if dPubComRecom == None:
perform_request_send (uid, user_addr, "", msg_subject, msg_body, 0, 0, 0, ln, 1)
sendMailToProjectLeader(doctype, categ, RN, user_addr, authors, "publication committee chair", msg_body)
__db_set_PubComRecom_time (key)
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
t = websubmit_templates.tmpl_publiline_displaycplxrecom (
ln = ln,
doctype = doctype,
categ = categ,
rn = RN,
apptype = apptype,
action = action,
status = status,
authors = authors,
title = title,
sysno = sysno,
msg_to = user_addr,
msg_to_group = "",
msg_subject = TEXT_PubComRecom_MSG_SUBJECT + " " + "\"" + item_details['title'] + "\"",
)
return t
elif (action == "ProjectLeaderDecision") and ((apptype == "RRP") or (apptype == "RPB")):
if __is_ProjectLeader (req, doctype, categ) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
t += """
<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="submit" class="formbutton" value="%(button_label)s" />
</form>""" % {'doctype' : doctype,
'categ' : categ,
'rn' : RN,
'apptype' : apptype,
'button_label' : _("Back to the document"),
}
if validate == "approve":
if dProjectLeaderAction == None:
(errors, infos) = perform_request_save_comment (uid, id_bskBASKET, sysno, msg_subject, msg_body, ln)
out = "%(infos)s<br /><br />" % {'infos' : infos[0]}
sendMailToSubmitter(doctype, categ, RN, "approved")
__db_set_status (key, 'approved')
return out + t
elif validate == "reject":
if dProjectLeaderAction == None:
(errors, infos) = perform_request_save_comment (uid, id_bskBASKET, sysno, msg_subject, msg_body, ln)
out = "%(infos)s<br /><br />" % {'infos' : infos[0]}
sendMailToSubmitter(doctype, categ, RN, "rejected")
__db_set_status (key, 'rejected')
return out + t
validation = """
<select name="validate">
<option> %(select)s</option>
<option value="approve">%(approve)s</option>
<option value="reject">%(reject)s</option>
</select>
<input type="submit" class="formbutton" value="%(button_label)s" />""" % {'select' : _('Select:'),
'approve' : _('Approve'),
'reject' : _('Reject'),
'button_label' : _('Take a decision'),
}
if apptype == "RRP":
TEXT_ProjectLeaderDecision_MSG_SUBJECT = TEXT_RSN_ProjectLeaderDecision_MSG_SUBJECT
elif apptype == "RPB":
TEXT_ProjectLeaderDecision_MSG_SUBJECT = TEXT_RPB_ProjectLeaderDecision_MSG_SUBJECT
else:
TEXT_ProjectLeaderDecision_MSG_SUBJECT = ""
t = websubmit_templates.tmpl_publiline_displaywritecomment(doctype, categ, RN, apptype, action, _("Take a decision"), TEXT_ProjectLeaderDecision_MSG_SUBJECT, validation, "", ln)
return t
elif (action == "ProjectLeaderDecision") and (apptype == "RDA"):
if __is_ProjectLeader (req, doctype, categ) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if status == "cancelled":
return _("Action unavailable for this document.") + "<br /> "
if validate == "approve":
if dProjectLeaderAction == None:
__db_set_status (key, 'approved')
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
elif validate == "reject":
if dProjectLeaderAction == None:
__db_set_status (key, 'rejected')
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
t = """<p>
<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="validate" value="approve" />
<input class="adminbutton" type="submit" value="%(approve)s" />
</form>
<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="validate" value="reject" />
<input class="adminbutton" type="submit" value="%(reject)s" />
</form>
</p>""" % {
'rn' : RN,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'approve' : _('Approve'),
'reject' : _('Reject'),
}
return t
elif (action == "AuthorCancel") and ((apptype == "RRP") or (apptype == "RPB") or (apptype == "RDA")):
if __is_Author (uid, sysno) != 0:
return _("Action unauthorized for this document.") + "<br /> "
if (status == "cancelled") or (dProjectLeaderAction != None):
return _("Action unavailable for this document.") + "<br /> "
if validate == "go":
__db_set_status (key, 'cancelled')
return displayCplxDocument(req, doctype,categ,RN,apptype, reply, commentId, ln)
t = """<p>
<form action="publiline.py">
<input type="hidden" name="flow" value="cplx" />
<input type="hidden" name="doctype" value="%(doctype)s" />
<input type="hidden" name="categ" value="%(categ)s" />
<input type="hidden" name="RN" value="%(rn)s" />
<input type="hidden" name="apptype" value="%(apptype)s" />
<input type="hidden" name="action" value="%(action)s" />
<input type="hidden" name="validate" value="go" />
<input class="adminbutton" type="submit" value="%(cancel)s" />
</form>
</p>""" % {
'rn' : RN,
'categ' : categ,
'doctype' : doctype,
'apptype' : apptype,
'action' : action,
'cancel' : _('Cancel'),
}
return t
else:
return _("Wrong action for this document.") + "<br /> "
return t
def get_pending_item_details(doctype, reportnumber):
"""Given a doctype and reference number, try to retrieve an item's details.
The first place to search for them should be the WebSubmit pending
directory. If nothing is retrieved from there, and attempt is made
to retrieve them from the CDS Invenio repository itself.
@param doctype: (string) - the doctype of the item for which brief
details are to be retrieved.
@param reportnumber: (string) - the report number of the item
for which details are to be retrieved.
@return: (dictionary or None) - If details are found for the item,
they will be returned in a dictionary structured as follows:
{ 'title' : '-', ## String - the item's title
'recid' : '', ## String - recid taken from the SN file
'report-number' : '', ## String - the item's report number
'authors' : [], ## List - the item's authors
}
If no details were found a NoneType is returned.
"""
## First try to get the details of a document from the pending dir:
item_details = get_brief_doc_details_from_pending(doctype, \
reportnumber)
if item_details is None:
item_details = get_brief_doc_details_from_repository(reportnumber)
## Return the item details:
return item_details
def get_brief_doc_details_from_pending(doctype, reportnumber):
"""Try to get some brief details about the submission that is awaiting
the referee's decision.
Details sought are:
+ title
+ Authors
+ recid (why?)
+ report-number (why?)
This function searches for a MARC XML record in the pending submission's
working directory. It prefers the so-called 'dummy' record, but will
search for the final MARC XML record that would usually be passed to
bibupload (i.e. recmysql) if that is not present. If neither of these
records are present, no details will be found.
@param doctype: (string) - the WebSubmit document type of the item
to be refereed. It is used in order to locate the submission's
working directory in the WebSubmit pending directory.
@param reportnumber: (string) - the report number of the item for
which details are to be recovered. It is used in order to locate the
submission's working directory in the WebSubmit pending directory.
@return: (dictionary or None) - If details are found for the item,
they will be returned in a dictionary structured as follows:
{ 'title' : '-', ## String - the item's title
'recid' : '', ## String - recid taken from the SN file
'report-number' : '', ## String - the item's report number
'authors' : [], ## List - the item's authors
}
If no details were found (i.e. no MARC XML files in the submission's
working directory), a NoneType is returned.
"""
pending_doc_details = None
marcxml_rec_name = None
## Check for a MARC XML record in the pending dir.
## If it's there, we will use it to obtain certain bibliographic
## information such as title, author(s), etc, which we will then
## display to the referee.
## We favour the "dummy" record (created with the WebSubmit function
## "Make_Dummy_MARC_XML_Record"), because it was made for this
## purpose. If it's not there though, we'll take the normal
## (final) recmysql record that would generally be passed to bibupload.
if os.access("%s/%s/%s/%s" % (CFG_WEBSUBMIT_PENDING_DIR, \
doctype, \
reportnumber, \
CFG_WEBSUBMIT_DUMMY_MARC_XML_REC), \
os.F_OK|os.R_OK):
## Found the "dummy" marc xml record in the submission dir.
## Use it:
marcxml_rec_name = CFG_WEBSUBMIT_DUMMY_MARC_XML_REC
elif os.access("%s/%s/%s/%s" % (CFG_WEBSUBMIT_PENDING_DIR, \
doctype, \
reportnumber, \
CFG_WEBSUBMIT_MARC_XML_REC), \
os.F_OK|os.R_OK):
## Although we didn't find the "dummy" marc xml record in the
## submission dir, we did find the "real" one (that which would
## normally be passed to bibupload). Use it:
marcxml_rec_name = CFG_WEBSUBMIT_MARC_XML_REC
## If we have a MARC XML record in the pending submission's
## working directory, go ahead and use it:
if marcxml_rec_name is not None:
try:
fh_marcxml_record = open("%s/%s/%s/%s" \
% (CFG_WEBSUBMIT_PENDING_DIR, \
doctype, \
reportnumber, \
marcxml_rec_name), "r")
xmltext = fh_marcxml_record.read()
fh_marcxml_record.close()
except IOError:
## Unfortunately, it wasn't possible to read the details of the
## MARC XML record. Register the exception.
exception_prefix = "Error: Publiline was unable to read the " \
"MARC XML record [%s/%s/%s/%s] when trying to " \
"use it to recover details about a pending " \
"submission." % (CFG_WEBSUBMIT_PENDING_DIR, \
doctype, \
reportnumber, \
marcxml_rec_name)
register_exception(prefix=exception_prefix)
else:
## Attempt to use bibrecord to create an internal representation
## of the record, from which we can extract certain bibliographic
## information:
records = create_records(xmltext, 1, 1)
try:
record = records[0][0]
if record is None:
raise ValueError
except (IndexError, ValueError):
## Bibrecord couldn't successfully represent the record
## contained in the xmltext string. The record must have
## been empty or badly formed (or something).
pass
else:
## Dictionary to hold the interesting details of the
## pending item:
pending_doc_details = { 'title' : '-',
'recid' : '',
'report-number' : '',
'authors' : [],
}
## Get the recid:
## Note - the old "getInPending" function reads the "SN"
## file from the submission's working directory and since
## the "SN" file is currently "magic" and hardcoded
## throughout WebSubmit, I'm going to stick to this model.
## I could, however, have tried to get it from the MARC XML
## record as so:
## recid = record_get_field_value(rec=record, tag="001")
try:
fh_recid = open("%s/%s/%s/SN" \
% (CFG_WEBSUBMIT_PENDING_DIR, \
doctype, \
reportnumber), "r")
recid = fh_recid.read()
fh_recid.close()
except IOError:
## Probably, there was no "SN" file in the submission's
## working directory.
pending_doc_details['recid'] = ""
else:
pending_doc_details['recid'] = recid.strip()
## Item report number (from record):
## Note: I don't know what purpose this serves. It appears
## to be used in the email that is sent to the author, but
## it seems funny to me, since we already have the report
## number (which is indeed used to find the submission's
## working directory in pending). Perhaps it's used for
## cases when the reportnumber is changed after approval?
## To investigate when time allows:
finalrn = record_get_field_value(rec=record, \
tag="037", \
code="a")
if finalrn != "":
pending_doc_details['report-number'] = finalrn
## Item title:
title = record_get_field_value(rec=record, \
tag="245", \
code="a")
if title != "":
pending_doc_details['title'] = title
else:
## Alternative title:
alt_title = record_get_field_value(rec=record, \
tag="246", \
ind1="1", \
code="a")
if alt_title != "":
pending_doc_details['title'] = alt_title
## Item first author:
first_author = record_get_field_value(rec=record, \
tag="100", \
code="a")
if first_author != "":
pending_doc_details['authors'].append(first_author)
## Other Authors:
other_authors = record_get_field_values(rec=record, \
tag="700", \
code="a")
for author in other_authors:
pending_doc_details['authors'].append(author)
## Return the details discovered about the pending document:
return pending_doc_details
def get_brief_doc_details_from_repository(reportnumber):
"""Try to get some brief details about the submission that is awaiting
the referee's decision.
Details sought are:
+ title
+ Authors
+ recid (why?)
+ report-number (why?)
+ email
This function searches in the CDS Invenio repository, based on
"reportnumber" for a record and then pulls the interesting fields
from it.
@param reportnumber: (string) - the report number of the item for
which details are to be recovered. It is used in the search.
@return: (dictionary or None) - If details are found for the item,
they will be returned in a dictionary structured as follows:
{ 'title' : '-', ## String - the item's title
'recid' : '', ## String - recid taken from the SN file
'report-number' : '', ## String - the item's report number
'authors' : [], ## List - the item's authors
}
If no details were found a NoneType is returned.
"""
## Details of the pending document, as found in the repository:
pending_doc_details = None
## Search for records matching this "report number"
found_record_ids = list(search_pattern(req=None, \
p=reportnumber, \
f="reportnumber", \
m="e"))
## How many records were found?
if len(found_record_ids) == 1:
## Found only 1 record. Get the fields of interest:
pending_doc_details = { 'title' : '-',
'recid' : '',
'report-number' : '',
'authors' : [],
'email' : '',
}
recid = found_record_ids[0]
## Authors:
first_author = get_fieldvalues(recid, "100__a")
for author in first_author:
pending_doc_details['authors'].append(author)
other_authors = get_fieldvalues(recid, "700__a")
for author in other_authors:
pending_doc_details['authors'].append(author)
## Title:
title = get_fieldvalues(recid, "245__a")
if len(title) > 0:
pending_doc_details['title'] = title[0]
else:
## There was no value for title - check for an alternative title:
alt_title = get_fieldvalues(recid, "2641_a")
if len(alt_title) > 0:
pending_doc_details['title'] = alt_title[0]
## Record ID:
pending_doc_details['recid'] = recid
## Report Number:
reptnum = get_fieldvalues(recid, "037__a")
if len(reptnum) > 0:
pending_doc_details['report-number'] = reptnum[0]
## Email:
email = get_fieldvalues(recid, "859__f")
if len(email) > 0:
pending_doc_details['email'] = email[0]
elif len(found_record_ids) > 1:
## Oops. This is unexpected - there shouldn't be me multiple matches
## for this item. The old "getInAlice" function would have simply
## taken the first record in the list. That's not very nice though.
## Some kind of warning or error should be raised here. FIXME.
pass
return pending_doc_details
# Retrieve info about document
def getInfo(doctype,categ,RN):
"""FIXME: DEPRECATED!"""
result = getInPending(doctype,categ,RN)
if not result:
result = getInAlice(doctype,categ,RN)
return result
#seek info in pending directory
def getInPending(doctype,categ,RN):
"""FIXME: DEPRECATED!"""
PENDIR="%s/pending" % CFG_WEBSUBMIT_STORAGEDIR
if os.path.exists("%s/%s/%s/AU" % (PENDIR,doctype,RN)):
fp = open("%s/%s/%s/AU" % (PENDIR,doctype,RN),"r")
authors=fp.read()
fp.close()
else:
authors = ""
if os.path.exists("%s/%s/%s/TI" % (PENDIR,doctype,RN)):
fp = open("%s/%s/%s/TI" % (PENDIR,doctype,RN),"r")
title=fp.read()
fp.close()
else:
title = ""
if os.path.exists("%s/%s/%s/SN" % (PENDIR,doctype,RN)):
fp = open("%s/%s/%s/SN" % (PENDIR,doctype,RN),"r")
sysno=fp.read()
fp.close()
else:
sysno = ""
if title == "" and os.path.exists("%s/%s/%s/TIF" % (PENDIR,doctype,RN)):
fp = open("%s/%s/%s/TIF" % (PENDIR,doctype,RN),"r")
title=fp.read()
fp.close()
if title == "":
return 0
else:
return (authors,title,sysno,"")
#seek info in Alice database
def getInAlice(doctype,categ,RN):
"""FIXME: DEPRECATED!"""
# initialize sysno variable
sysno = ""
searchresults = list(search_pattern(req=None, p=RN, f="reportnumber"))
if len(searchresults) == 0:
return 0
sysno = searchresults[0]
if sysno != "":
title = Get_Field('245__a',sysno)
emailvalue = Get_Field('8560_f',sysno)
authors = Get_Field('100__a',sysno)
authors += "\n%s" % Get_Field('700__a',sysno)
newrn = Get_Field('037__a',sysno)
return (authors,title,sysno,newrn)
else:
return 0
def SendEnglish(doctype,categ,RN,title,authors,access,sysno):
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
# retrieve useful information from webSubmit configuration
res = run_sql("select value from sbmPARAMETERS where name='categformatDAM' and doctype=%s", (doctype,))
categformat = res[0][0]
categformat = re.sub("<CATEG>","([^-]*)",categformat)
categs = re.match(categformat,RN)
if categs is not None:
categ = categs.group(1)
else:
categ = "unknown"
res = run_sql("select value from sbmPARAMETERS where name='addressesDAM' and doctype=%s",(doctype,))
if len(res) > 0:
otheraddresses = res[0][0]
otheraddresses = otheraddresses.replace("<CATEG>",categ)
else:
otheraddresses = ""
# Build referee's email address
refereeaddress = ""
# Try to retrieve the referee's email from the referee's database
for user in acc_get_role_users(acc_get_role_id("referee_%s_%s" % (doctype,categ))):
refereeaddress += user[1] + ","
# And if there are general referees
for user in acc_get_role_users(acc_get_role_id("referee_%s_*" % doctype)):
refereeaddress += user[1] + ","
refereeaddress = re.sub(",$","",refereeaddress)
# Creation of the mail for the referee
addresses = ""
if refereeaddress != "":
addresses = refereeaddress + ","
if otheraddresses != "":
addresses += otheraddresses
else:
addresses = re.sub(",$","",addresses)
if addresses=="":
SendWarning(doctype,categ,RN,title,authors,access)
return 0
if authors == "":
authors = "-"
res = run_sql("select value from sbmPARAMETERS where name='directory' and doctype=%s", (doctype,))
directory = res[0][0]
message = """
The document %s has been published as a Communication.
Your approval is requested for it to become an official Note.
Title: %s
Author(s): %s
To access the document(s), select the file(s) from the location:
<%s/record/%s/files/>
To approve/reject the document, you should go to this URL:
<%s/approve.py?%s>
---------------------------------------------
Best regards.
The submission team.""" % (RN,title,authors,CFG_SITE_URL,sysno,CFG_SITE_URL,access)
# send the mail
send_email(FROMADDR,addresses,"Request for Approval of %s" % RN, message,footer="")
return ""
def SendWarning(doctype,categ,RN,title,authors,access):
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
message = "Failed sending approval email request for %s" % RN
# send the mail
send_email(FROMADDR,CFG_SITE_ADMIN_EMAIL,"Failed sending approval email request",message)
return ""
def errorMsg(title,req,c=CFG_SITE_NAME,ln=CFG_SITE_LANG):
return page(title="error",
body = create_error_box(req, title=title,verbose=0, ln=ln),
description="%s - Internal Error" % c,
keywords="%s, Internal Error" % c,
uid = getUid(req),
language=ln,
req=req,
navmenuid='yourapprovals')
def warningMsg(title,req,c=CFG_SITE_NAME,ln=CFG_SITE_LANG):
return page(title="warning",
body = title,
description="%s - Internal Error" % c,
keywords="%s, Internal Error" % c,
uid = getUid(req),
language=ln,
req=req,
navmenuid='yourapprovals')
def sendMailToReferee(doctype,categ,RN,email,authors):
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
message = """
Scientific Note approval for document %s has been submitted to the CERN Document Server.
Your recommendation is requested on it.
Requested subcategory: %s
Title: %s
Author(s): %s
To access the document(s), select the file(s) from the location:
<%s/record/%s>
To make a reccommendation, you should go to this URL:
<%s>
You can also check the status of the document:
<%s>
---------------------------------------------
Best regards.
The submission team.""" % (str(RN),
str(categ),
str(item_details['title']),
authors,
CFG_SITE_URL,
str(item_details['recid']),
str(CFG_SITE_URL + "/publiline.py?flow=cplx&doctype="+doctype+"&ln=en&apptype=RRP&categ="+categ+"&RN="+RN+"&action=RefereeRecom"),
str(CFG_SITE_URL + "/publiline.py?flow=cplx&doctype="+doctype+"&ln=en&apptype=RRP&categ="+categ+"&RN="+RN))
# send the mail
send_email(FROMADDR, email,"Request for document %s recommendation" % (RN),message)
return ""
def sendMailToGroup(doctype,categ,RN,group_id,authors):
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
message = """
Scientific Note approval for document %s has been submitted to the CERN Document Server.
Your comments are requested on this document.
Requested subcategory: %s
Title: %s
Author(s): %s
To access the document(s), select the file(s) from the location:
<%s/record/%s>
To leave a comment or check the status of the approval process, you should go to this URL:
<%s>
""" % (str(RN),
str(categ),
str(item_details['title']),
authors,
CFG_SITE_URL,
str(item_details['recid']),
str(CFG_SITE_URL + "/publiline.py?flow=cplx&doctype="+doctype+"&ln=en&apptype=RRP&categ="+categ+"&RN="+RN))
# send mails to all members of the ATLAS group
group_member_ids = run_sql("SELECT id_user FROM user_usergroup WHERE id_usergroup = '%s'" % (group_id))
for member_id in group_member_ids:
member_email = run_sql("SELECT email FROM user WHERE id = '%s'" % (member_id))
if not member_email[0][0] == "[email protected]":
send_email(FROMADDR, member_email[0][0],"Request for comment on document %s" % (RN),message)
return ""
def sendMailToProjectLeader(doctype, categ, RN, email, authors, actor, recommendation):
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
message = """
Scientific Note approval for document %s has been submitted to the CERN Document Server.
Your approval is requested for this document. Once you have received recommendations from both the referee and the publication committee chair, you will be able to make your decision.
Requested subcategory: %s
Title: %s
Author(s): %s
To access the document(s), select the file(s) from the location:
<%s/record/%s>
The %s has made a recommendation for the document. He/she said the following:
%s
You can approve this document by visiting this page:
<%s>
You can also check the status of the document from:
<%s>
""" % (str(RN),
str(categ),
str(item_details['title']),
authors,
CFG_SITE_URL,
str(item_details['recid']),
actor,
recommendation,
str(CFG_SITE_URL + "/publiline.py?flow=cplx&doctype="+doctype+"&ln=en&apptype=RRP&categ="+categ+"&RN="+RN+"&action=ProjectLeaderDecision"),
str(CFG_SITE_URL + "/publiline.py?flow=cplx&doctype="+doctype+"&ln=en&apptype=RRP&categ="+categ+"&RN="+RN))
# send mails to all members of the ATLAS group
send_email(FROMADDR, email,"Request for approval/rejection of document %s" % (RN),message)
return ""
def sendMailToSubmitter(doctype, categ, RN, outcome):
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
message = """
The approval process for your document : %s, has been completed. The details of this document are as follows:
Requested subcategory: %s
Title: %s
The project leader has made the following recommendation for the document:
%s
""" % (RN, categ, item_details['title'], outcome)
# send mails to all members of the ATLAS group
send_email(FROMADDR, item_details['email'],"Final outcome for approval of document : %s" % (RN),message)
return ""
def sendMailtoCommitteeChair(doctype, categ, RN, email, authors):
item_details = get_brief_doc_details_from_repository(RN)
## get_brief_doc_details_from_repository has returned either None
## or a dictionary with the following structure:
## { 'title' : '-', ## String - the item's title
## 'recid' : '', ## String - recid
## 'report-number' : '', ## String - the item's report number
## 'authors' : [], ## List - the item's authors
## }
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
message = """
The referree assigned to the document detailed below has made a reccommendation. You are now requested to make a reccommendation of your own.
Requested subcategory: %s
Title: %s
Author(s): %s
To access the document(s), select the file(s) from the location:
<%s/record/%s>
You can make a reccommendation by visiting this page:
<%s>
""" % (str(categ),
str(item_details['title']),
authors,
CFG_SITE_URL,
str(item_details['recid']),
str(CFG_SITE_URL + "/publiline.py?flow=cplx&doctype="+doctype+"&ln=en&apptype=RRP&categ="+categ+"&RN="+RN))
# send mails to all members of the ATLAS group
send_email(FROMADDR, email,"Request for reccommendation of document %s" % (RN),message)
|
drmrd/ansible | refs/heads/devel | lib/ansible/plugins/terminal/asa.py | 52 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"error:", re.I),
re.compile(br"Removing.* not allowed, it is being used")
]
def on_open_shell(self):
if self._get_prompt().strip().endswith(b'#'):
self.disable_pager()
def disable_pager(self):
cmd = {u'command': u'no terminal pager'}
try:
self._exec_cli_command(u'no terminal pager')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to disable terminal pager')
def on_become(self, passwd=None):
if self._get_prompt().strip().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
self.disable_pager()
|
xen0l/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/ec2_customer_gateway_facts.py | 33 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ec2_customer_gateway_facts
short_description: Gather facts about customer gateways in AWS
description:
- Gather facts about customer gateways in AWS
version_added: "2.5"
requirements: [ boto3 ]
author: Madhura Naniwadekar(@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeCustomerGateways.html) for possible filters.
customer_gateway_ids:
description:
- Get details of a specific customer gateways using customer gateway ID/IDs. This value should be provided as a list.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather facts about all customer gateways
ec2_customer_gateway_facts:
- name: Gather facts about a filtered list of customer gateways, based on tags
ec2_customer_gateway_facts:
region: ap-southeast-2
filters:
"tag:Name": test-customer-gateway
"tag:AltName": test-customer-gateway-alt
register: cust_gw_facts
- name: Gather facts about a specific customer gateway by specifying customer gateway ID
ec2_customer_gateway_facts:
region: ap-southeast-2
customer_gateway_ids:
- 'cgw-48841a09'
- 'cgw-fec021ce'
register: cust_gw_facts
'''
RETURN = '''
customer_gateways:
description: List of one or more customer gateways.
returned: always
type: list
sample: [
{
"bgp_asn": "65000",
"customer_gateway_id": "cgw-fec844ce",
"customer_gateway_name": "test-customer-gw",
"ip_address": "110.112.113.120",
"state": "available",
"tags": [
{
"key": "Name",
"value": "test-customer-gw"
}
],
"type": "ipsec.1"
}
]
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def list_customer_gateways(connection, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['CustomerGatewayIds'] = module.params.get('customer_gateway_ids')
try:
result = json.loads(json.dumps(connection.describe_customer_gateways(**params), default=date_handler))
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
snaked_customer_gateways = [camel_dict_to_snake_dict(gateway) for gateway in result['CustomerGateways']]
if snaked_customer_gateways:
for customer_gateway in snaked_customer_gateways:
customer_gateway['tags'] = boto3_tag_list_to_ansible_dict(customer_gateway.get('tags', []))
customer_gateway_name = customer_gateway['tags'].get('Name')
if customer_gateway_name:
customer_gateway['customer_gateway_name'] = customer_gateway_name
module.exit_json(changed=False, customer_gateways=snaked_customer_gateways)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
customer_gateway_ids=dict(default=[], type='list'),
filters=dict(default={}, type='dict')
)
)
module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[['customer_gateway_ids', 'filters']],
supports_check_mode=True)
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
list_customer_gateways(connection, module)
if __name__ == '__main__':
main()
|
mic4ael/indico | refs/heads/master | indico/migrations/versions/20180423_1602_66ecbb1c0ddd_add_natsort_function.py | 7 | """Add natsort function
Revision ID: 66ecbb1c0ddd
Revises: 813ea74ce8dc
Create Date: 2018-04-23 16:02:35.682560
"""
from alembic import op
from indico.core.db.sqlalchemy.custom.natsort import SQL_FUNCTION_NATSORT
# revision identifiers, used by Alembic.
revision = '66ecbb1c0ddd'
down_revision = '813ea74ce8dc'
branch_labels = None
depends_on = None
def upgrade():
op.execute(SQL_FUNCTION_NATSORT)
def downgrade():
op.execute('DROP FUNCTION indico.natsort(value TEXT)')
|
adityamadhu20/Calculator | refs/heads/master | calculator_as_plugin/lib/calculator_as_plugin/utils/__init__.py | 1 | #
from .testcase import TestCase, main
|
jpaugh/braille-converter | refs/heads/master | braille/tests/__init__.py | 2 | # Copyright 2012 Jonathan Paugh
# See COPYING for license details
from braille import convert, dots, util
from braille.options import opt
import unittest
class BrTestCase(unittest.TestCase):
'''
Super-test class. All tests of the actual text translated should be
based on this class. The subclasses call super(self.__class__,
self).__init__(pairs), where pairs is an iterable of pairs in the
form: (input, expected_output).
In the midst of the pairs, any strings encountered will be printed.
This allows headers to be printed for the tests, i.e.:
pairs = [
'First tests',
('test 1', '...'),
...
There are three methods h, sh, and ss which generate headers,
subheaders, and subsubheaders specifically for tests from the book.
'''
def setUp(self):
self.pairs = self.pairs()
def pairs(self):
'''
return the list of pairs for the test
'''
return ()
def test_rule(self):
for pair in self.pairs:
if type(pair) == str:
util.log.info(pair)
else:
(prn, brl) = pair
util.log.debug(' p <<%s>>' % prn)
util.log.debug(' e << %s >>' % brl.replace('', ' '))
util.log.debug(' g << %s >>' % convert(prn).replace('', ' '))
self.assertEqual(brl, convert(prn))
class EBAETestCase(BrTestCase):
'''
Superclass for test cases that document our support for the rules from
the English Braille: American Edition book. There are several methods
defined here to generate headers for these tests, that show which rule
of the book is being tested. These are h(), sh(), and ss(). These
methods return strings which should be interspersed within the test
pairs, like so:
pairs = [
self.h('Punctuation'),
(',', dots('2')),
...
self.sh(''),
self.sh('Quotation Marks'),
('"', dots('236')),
...
...
'''
#Formatting strings
header = 'Rule %s: %s'
subheader = ' %s.%d: %s'
ssheader = ' %s.%d.%s: %s'
#Formatting functions
def h(self, msg):
'''
Print the header for this rule. (Should be called only once).
'''
if not getattr(self, 'rule', None):
raise TypeError, 'self.rule must be specified'
self._rule = self.rule
return self.header % (self._rule, msg)
def sh(self, msg, go=None):
'''
Return the next subheader. It automatically increments the subrule
number each time it is called (and resets the subsubheader number).
However, if you skip some subrules, set the go arument to the number
of the next header to be printed.
'''
if go:
self._sh = int(go)
else:
self._sh += 1
self._ss = 0
return self.subheader % (self._rule, self._sh, msg)
def ss(self, msg, go=None):
'''
Return the next subsubheader. It automatically increments the
subsubrule number each time it is called. However, you can manually
set the number via the go argument, incase you skip some tests.
'''
if go:
self._ss = str(go)
else:
if self._ss:
self._ss = chr(ord(_ss) + 1)
else:
self._ss = 'a'
return self.ssheader % (self._rule, self._sh, self._ss, msg)
|
MahdiZareie/VersionMonitoring | refs/heads/master | project/migrations/0004_auto_20160417_1737.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-17 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0003_auto_20160417_1735'),
]
operations = [
migrations.AlterField(
model_name='version',
name='link',
field=models.URLField(blank=True, null=True),
),
]
|
sunils34/buffer-django-nonrel | refs/heads/master | tests/modeltests/m2m_through/models.py | 91 | from django.db import models
from datetime import datetime
# M2M described on one of the models
class Person(models.Model):
name = models.CharField(max_length=128)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
members = models.ManyToManyField(Person, through='Membership')
custom_members = models.ManyToManyField(Person, through='CustomMembership', related_name="custom")
nodefaultsnonulls = models.ManyToManyField(Person, through='TestNoDefaultsOrNulls', related_name="testnodefaultsnonulls")
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
date_joined = models.DateTimeField(default=datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
class Meta:
ordering = ('date_joined', 'invite_reason', 'group')
def __unicode__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class CustomMembership(models.Model):
person = models.ForeignKey(Person, db_column="custom_person_column", related_name="custom_person_related_name")
group = models.ForeignKey(Group)
weird_fk = models.ForeignKey(Membership, null=True)
date_joined = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Meta:
db_table = "test_table"
class TestNoDefaultsOrNulls(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
nodefaultnonull = models.CharField(max_length=5)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Friendship", symmetrical=False)
def __unicode__(self):
return self.name
class Friendship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_friended = models.DateTimeField()
|
sergei-maertens/django | refs/heads/master | tests/reverse_lookup/__init__.py | 12133432 | |
Aptitudetech/ERPNext | refs/heads/master | erpnext/accounts/report/asset_depreciation_ledger/__init__.py | 12133432 | |
siosio/intellij-community | refs/heads/master | python/testData/packaging/PyPackageUtil/CollectingPackageNamesIgnoresChildrenOfDirectoriesWithoutInitPy/project/pkg/__init__.py | 12133432 | |
sammyshj/gci | refs/heads/master | modules/ClimateDataPortal/DSL/Stringification.py | 53 |
from . import *
def Months__str__(month_filter):
return "Months(%s)" % (
", ".join(
Months.sequence[month_number + 1]
for month_number in month_filter.month_numbers
)
)
Months.__str__ = Months__str__
def From__str__(from_date):
original_args = [from_date.year]
if from_date.month is not None:
original_args.append(from_date.month)
if from_date.day is not None:
original_args.append(from_date.day)
return "From(%s)" % ", ".join(map(str,original_args))
From.__str__ = From__str__
def To__str__(to_date):
original_args = [to_date.year]
if to_date.month is not None:
original_args.append(to_date.month)
if to_date.day is not None:
original_args.append(to_date.day)
return "To(%s)" % ", ".join(map(str,original_args))
To.__str__ = To__str__
def Number__str__(number):
return "%s %s" % (number.value, number.units)
Number.__str__ = Number__str__
def AggregationNode__str__(aggregation):
return "".join((
type(aggregation).__name__, "(\"",
aggregation.dataset_name, "\", ",
", ".join(
map(str, aggregation.specification)
),
")"
))
AggregationNode.__str__ = AggregationNode__str__
def BinaryOperator__str__(binop):
return str(binop.left)+" "+binop.op+" "+str(binop.right)
BinaryOperator.__str__ = BinaryOperator__str__
|
anton-golubkov/Garland | refs/heads/master | src/ipf/ipfblock/__init__.py | 1 | #-------------------------------------------------------------------------------
# Copyright (c) 2011 Anton Golubkov.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Lesser Public License v2.1
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
#
# Contributors:
# Anton Golubkov - initial API and implementation
#-------------------------------------------------------------------------------
""" Image processing flow block package
This package contains all processing blocks witch implements
OpenCV algorithms and other image processing operations.
"""
|
mumax/2 | refs/heads/master | tests/delta.py | 1 | # -*- coding: utf-8 -*-
from mumax2 import *
from math import *
eps = 1e-7
# number of cells
Nx = 32
Ny = 32
Nz = 32
setgridsize(Nx, Ny, Nz)
# physical size in meters
sizeX = 32e-9
sizeY = 32e-9
sizeZ = 32e-9
setcellsize(sizeX/Nx, sizeY/Ny, sizeZ/Nz)
load('micromagnetism')
m = [[[[1.0]]], [[[1.0]]], [[[1.0]]]]
setarray('m', m)
savestate('m_0', 'm')
adddeltaquant('m', 'm_0')
m = [[[[0.0]]], [[[0.0]]], [[[0.0]]]]
setarray('m', m)
m0 = getarray('Δm')
ok = True
valx = sqrt(1./3.)
valy = sqrt(1./3.)
valz = sqrt(1./3.)
for kk in range(Nz):
for jj in range(Ny):
for ii in range(Nx):
diff = abs((m0[0][ii][jj][kk] + valx) + (m0[1][ii][jj][kk] + valy) + (m0[2][ii][jj][kk] + valz))
if diff > eps:
print ii, jj, kk, diff
ok = None
if ok :
print "\033[32m" + "✔ PASSED" + "\033[0m"
sys.exit()
else:
print "\033[31m" + "✘ FAILED" + "\033[0m"
sys.exit(1)
|
kinglyduck/hackerspace | refs/heads/master | src/badges/models.py | 1 | from datetime import time, date, datetime
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Max, Sum, Count
from django.templatetags.static import static
from django.utils import timezone
from prerequisites.models import Prereq
# Create your models here.
class BadgeType(models.Model):
name = models.CharField(max_length=50, unique=True)
sort_order = models.PositiveIntegerField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
repeatable = models.BooleanField(default = True)
# manual_only = models.BooleanField(default = False)
fa_icon = models.CharField(max_length=50, blank=True, null=True,
help_text="Name of a font-awesome icon, e.g.'fa-gift'")
def __str__(self):
return self.name
class Meta:
ordering = ['sort_order']
class BadgeSeries(models.Model):
name = models.CharField(max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ["name"]
verbose_name_plural = "Badge Series'"
class BadgeQuerySet(models.query.QuerySet):
def get_type(self, badge_type):
return self.filter(badge_type = badge_type)
def get_active(self):
return self.filter(active = True)
class BadgeManager(models.Manager):
def get_queryset(self):
return BadgeQuerySet(self.model, using=self._db).order_by('sort_order')
#this should be generic and placed in the prerequisites app
# extend models.Model (e.g. PrereqModel) and prereq users should subclass it
def get_conditions_met(self, user):
pk_met_list = [
obj.pk for obj in self.get_queryset()
if Prereq.objects.all_conditions_met(obj, user, False)
# if not obj.badge_type.manual_only and Prereq.objects.all_conditions_met(obj, user)
]
return self.filter(pk__in = pk_met_list)
class Badge(models.Model):
name = models.CharField(max_length=50, unique=True)
xp = models.PositiveIntegerField(default = 0)
datetime_created = models.DateTimeField(auto_now_add=True, auto_now=False)
datetime_last_edit = models.DateTimeField(auto_now_add=False, auto_now=True)
short_description = models.TextField(blank=True, null=True)
series = models.ForeignKey(BadgeSeries, blank=True, null=True)
badge_type = models.ForeignKey(BadgeType)
icon = models.ImageField(upload_to='icons/badges/', blank=True, null=True) #needs Pillow for ImageField
sort_order = models.PositiveIntegerField(blank=True, null=True)
active = models.BooleanField(default = True)
# hours_between_repeats = models.PositiveIntegerField(default = 0)
# date_available = models.DateField(default=timezone.now)
# time_available = models.TimeField(default=time().min) # midnight
# date_expired = models.DateField(blank=True, null=True)
# time_expired = models.TimeField(blank=True, null=True, help_text= 'only used if date_expired is blank')
# minimum_XP = models.PositiveIntegerField(blank=True, null=True)
# maximum_XP = models.PositiveIntegerField(blank=True, null=True)
objects = BadgeManager()
class Meta:
#order_with_respect_to = 'badge_type'
ordering = ['sort_order', 'name']
def __str__(self):
return self.name
def prereqs(self):
return Prereq.objects.all_parent(self)
def get_absolute_url(self):
return reverse('badges:list')
def get_icon_url(self):
if self.icon and hasattr(self.icon, 'url'):
return self.icon.url
else:
return static('img/default_icon.png')
# to help with the prerequisite choices!
@staticmethod
def autocomplete_search_fields():
return ("name__icontains",)
# all models that want to act as a possible prerequisite need to have this method
# Create a default in the PrereqModel(models.Model) class that uses a default:
# prereq_met boolean field. Use that or override the method like this
def condition_met_as_prerequisite(self, user, num_required):
num_approved = BadgeAssertion.objects.all_for_user_badge(user, self).count()
# print("num_approved: " + str(num_approved) + "/" + str(num_required))
return num_approved >= num_required
class BadgeAssertionQuerySet(models.query.QuerySet):
def get_user(self, user):
return self.filter(user = user)
def get_badge(self, badge):
return self.filter(badge = badge)
def get_type(self, badge_type):
return self.filter(badge__badge_type = badge_type)
def no_game_lab(self):
return self.filter(game_lab_transfer = False)
class BadgeAssertionManager(models.Manager):
def get_queryset(self):
return BadgeAssertionQuerySet(self.model, using=self._db)
def all_for_user_badge(self, user, badge):
return self.get_queryset().get_user(user).get_badge(badge)
def all_for_user(self, user):
return self.get_queryset().get_user(user)
def all_for_user_distinct(self, user):
"""This only works in a postgresql database"""
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.postgresql_psycopg2':
return self.get_queryset().get_user(user).order_by('badge_id').distinct('badge')
return self.get_queryset().get_user(user)
def num_assertions(self, user, badge):
qs = self.all_for_user_badge(user, badge)
if qs.exists():
max_dict = qs.aggregate(Max('ordinal'))
return max_dict.get('ordinal__max')
else:
return 0
def get_assertion_ordinal(self, user, badge):
return self.num_assertions(user, badge) + 1
def create_assertion(self, user, badge, issued_by=None, transfer=False):
ordinal = self.get_assertion_ordinal(user, badge)
new_assertion = BadgeAssertion(
badge = badge,
user = user,
ordinal = ordinal,
issued_by = issued_by,
game_lab_transfer = transfer,
)
new_assertion.save()
return new_assertion
def check_for_new_assertions(self, user, transfer=False):
badges = Badge.objects.get_conditions_met(user)
for badge in badges:
#if the badge doesn't already exist
if not self.all_for_user_badge(user, badge):
self.create_assertion(user, badge, None, transfer)
def get_by_type_for_user(self, user):
self.check_for_new_assertions(user)
types = BadgeType.objects.all()
qs = self.get_queryset().get_user(user)
by_type = [
{
'badge_type': t,
'list': qs.get_type(t)
} for t in types
]
return by_type
def calculate_xp(self, user):
# self.check_for_new_assertions(user)
total_xp = self.get_queryset().no_game_lab().get_user(user).aggregate(Sum('badge__xp'))
xp = total_xp['badge__xp__sum']
if xp is None:
xp = 0
return xp
class BadgeAssertion(models.Model):
badge = models.ForeignKey(Badge)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
ordinal = models.PositiveIntegerField(default = 1, help_text = 'indicating the nth time user has received this badge')
# time_issued = models.DateTimeField(default = timezone.now)
timestamp = models.DateTimeField(auto_now=True, auto_now_add=False)
updated = models.DateTimeField(auto_now=False, auto_now_add=True)
issued_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, related_name='issued_by')
game_lab_transfer = models.BooleanField(default = False, help_text = 'XP not counted')
objects = BadgeAssertionManager()
def __str__(self):
# ordinal_str = ""
# if self.ordinal > 1:
# ordinal_str = " (" + str(self.ordinal) + ")"
return self.badge.name #+ ordinal_str
def get_absolute_url(self):
return reverse('badges:list')
def count(self):
"""Get the number of assertions with the same badge and user."""
return BadgeAssertion.objects.num_assertions(self.user, self.badge)
def count_bootstrap_badge(self):
"""Get the number of assertions with the same badge and user. But if
there is <2, return "" so that it won't appear when used in
Bootstrap Badges: http://getbootstrap.com/components/#badges
"""
count = self.count()
if count < 2:
return ""
return count
from django.dispatch import receiver
from django.db.models.signals import post_save
from notifications.signals import notify
#only receive signals from BadgeAssertion model
@receiver(post_save, sender=BadgeAssertion)
def post_save_receiver(sender, **kwargs):
assertion = kwargs["instance"]
if kwargs["created"]:
#need an issuing object, fix this better, should be generic something "Hackerspace or "Automatic".
sender = assertion.issued_by
if sender==None:
sender=User.objects.filter(is_staff=True).first()
icon = "<i class='text-warning fa fa-lg fa-fw "
icon += assertion.badge.badge_type.fa_icon
icon +="'></i>"
notify.send(
sender,
# action= action,
target=assertion.badge,
recipient=assertion.user,
affected_users=[assertion.user,],
icon= icon,
verb="granted you a")
|
baroquebobcat/pants | refs/heads/master | src/python/pants/backend/jvm/subsystems/shader.py | 1 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
from collections import namedtuple
from contextlib import contextmanager
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.java.jar.jar_dependency import JarDependency
from pants.subsystem.subsystem import Subsystem, SubsystemError
from pants.util.contextutil import temporary_file
logger = logging.getLogger(__name__)
class UnaryRule(namedtuple('UnaryRule', ['name', 'pattern'])):
"""Base class for shading keep and zap rules specifiable in BUILD files."""
def render(self):
return '{name} {pattern}\n'.format(name=self.name, pattern=self.pattern)
class RelocateRule(namedtuple('Rule', ['from_pattern', 'to_pattern'])):
"""Base class for shading relocation rules specifiable in BUILD files."""
_wildcard_pattern = re.compile('[*]+')
_starts_with_number_pattern = re.compile('^[0-9]')
_illegal_package_char_pattern = re.compile('[^a-z0-9_]', re.I)
@classmethod
def _infer_shaded_pattern_iter(cls, from_pattern, prefix=None):
if prefix:
yield prefix
last = 0
for i, match in enumerate(cls._wildcard_pattern.finditer(from_pattern)):
yield from_pattern[last:match.start()]
yield '@{}'.format(i+1)
last = match.end()
yield from_pattern[last:]
@classmethod
def new(cls, from_pattern, shade_pattern=None, shade_prefix=None):
if not shade_pattern:
shade_pattern = ''.join(cls._infer_shaded_pattern_iter(from_pattern, shade_prefix))
return cls(from_pattern, shade_pattern)
def render(self):
return 'rule {0} {1}\n'.format(self.from_pattern, self.to_pattern)
class Shading(object):
"""Wrapper around relocate and exclude shading rules exposed in BUILD files."""
SHADE_PREFIX = '__shaded_by_pants__.'
"""The default shading package."""
@classmethod
def create_keep(cls, pattern):
"""Creates a rule which marks classes matching the given pattern as roots.
If any keep rules are set, all classes that are not reachable from roots are removed from the
jar.
Examples: ::
# Only include classes reachable from Main.
shading_keep('org.foobar.example.Main')
# Only keep classes reachable from the example package.
shading_keep('org.foobar.example.*')
:param string pattern: Any fully-qualified classname which matches this pattern will be kept as
a root. '*' is a wildcard that matches any individual package component, and '**' is a
wildcard that matches any trailing pattern (ie the rest of the string).
"""
return UnaryRule('keep', pattern)
@classmethod
def create_zap(cls, pattern):
"""Creates a rule which removes matching classes from the jar.
Examples: ::
# Remove the main class.
shading_zap('org.foobar.example.Main')
# Remove everything in the example package.
shading_keep('org.foobar.example.*')
:param string pattern: Any fully-qualified classname which matches this pattern will removed
from the jar. '*' is a wildcard that matches any individual package component, and '**' is a
wildcard that matches any trailing pattern (ie the rest of the string).
"""
return UnaryRule('zap', pattern)
@classmethod
def create_relocate(cls, from_pattern, shade_pattern=None, shade_prefix=None):
"""Creates a rule which shades jar entries from one pattern to another.
Examples: ::
# Rename everything in the org.foobar.example package
# to __shaded_by_pants__.org.foobar.example.
shading_relocate('org.foobar.example.**')
# Rename org.foobar.example.Main to __shaded_by_pants__.org.foobar.example.Main
shading_relocate('org.foobar.example.Main')
# Rename org.foobar.example.Main to org.foobar.example.NotMain
shading_relocate('org.foobar.example.Main', 'org.foobar.example.NotMain')
# Rename all 'Main' classes under any direct subpackage of org.foobar.
shading_relocate('org.foobar.*.Main')
# Rename org.foobar package to com.barfoo package
shading_relocate('org.foobar.**', 'com.barfoo.@1')
# Rename everything in org.foobar.example package to __hello__.org.foobar.example
shading_relocate('org.foobar.example.**', shade_prefix='__hello__')
:param string from_pattern: Any fully-qualified classname which matches this pattern will be
shaded. '*' is a wildcard that matches any individual package component, and '**' is a
wildcard that matches any trailing pattern (ie the rest of the string).
:param string shade_pattern: The shaded pattern to use, where ``@1``, ``@2``, ``@3``, etc are
references to the groups matched by wildcards (groups are numbered from left to right). If
omitted, this pattern is inferred from the input pattern, prefixed by the ``shade_prefix``
(if provided). (Eg, a ``from_pattern`` of ``com.*.foo.bar.**`` implies a default
``shade_pattern`` of ``[email protected].@2``)
:param string shade_prefix: Prefix to prepend when generating a ``shade_pattern`` (if a
``shade_pattern`` is not provided by the user). Defaults to '``__shaded_by_pants__.``'.
"""
# NB(gmalmquist): Have have to check "is None" rather than using an or statement, because the
# empty-string is a valid prefix which should not be replaced by the default prefix.
shade_prefix = Shading.SHADE_PREFIX if shade_prefix is None else shade_prefix
return RelocateRule.new(from_pattern, shade_pattern, shade_prefix)
@classmethod
def create_exclude(cls, pattern):
"""Creates a rule which excludes the given pattern from shading.
Examples: ::
# Don't shade the org.foobar.example.Main class
shading_exclude('org.foobar.example.Main')
# Don't shade anything under org.foobar.example
shading_exclude('org.foobar.example.**')
:param string pattern: Any fully-qualified classname which matches this pattern will NOT be
shaded. '*' is a wildcard that matches any individual package component, and '**' is a
wildcard that matches any trailing pattern (ie the rest of the string).
"""
return cls.create_relocate(pattern, shade_prefix='')
@classmethod
def create_keep_package(cls, package_name, recursive=True):
"""Convenience constructor for a package keep rule.
Essentially equivalent to just using ``shading_keep('package_name.**')``.
:param string package_name: Package name to keep (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to keep everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_keep(cls._format_package_glob(package_name, recursive))
@classmethod
def create_zap_package(cls, package_name, recursive=True):
"""Convenience constructor for a package zap rule.
Essentially equivalent to just using ``shading_zap('package_name.**')``.
:param string package_name: Package name to remove (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to remove everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_zap(cls._format_package_glob(package_name, recursive))
@classmethod
def create_relocate_package(cls, package_name, shade_prefix=None, recursive=True):
"""Convenience constructor for a package relocation rule.
Essentially equivalent to just using ``shading_relocate('package_name.**')``.
:param string package_name: Package name to shade (eg, ``org.pantsbuild.example``).
:param string shade_prefix: Optional prefix to apply to the package. Defaults to
``__shaded_by_pants__.``.
:param bool recursive: Whether to rename everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_relocate(from_pattern=cls._format_package_glob(package_name, recursive),
shade_prefix=shade_prefix)
@classmethod
def create_exclude_package(cls, package_name, recursive=True):
"""Convenience constructor for a package exclusion rule.
Essentially equivalent to just using ``shading_exclude('package_name.**')``.
:param string package_name: Package name to exclude (eg, ``org.pantsbuild.example``).
:param bool recursive: Whether to exclude everything under any subpackage of ``package_name``,
or just direct children of the package. (Defaults to True).
"""
return cls.create_relocate(from_pattern=cls._format_package_glob(package_name, recursive),
shade_prefix='')
@classmethod
def _format_package_glob(cls, package_name, recursive=True):
return '{package}.{capture}'.format(package=package_name, capture='**' if recursive else '*')
class Shader(object):
"""Creates shaded jars."""
class Error(Exception):
"""Indicates an error shading a jar."""
class Factory(JvmToolMixin, Subsystem):
options_scope = 'shader'
class Error(SubsystemError):
"""Error creating a Shader with the Shader.Factory subsystem."""
@classmethod
def subsystem_dependencies(cls):
return super(Shader.Factory, cls).subsystem_dependencies() + (DistributionLocator,)
@classmethod
def register_options(cls, register):
super(Shader.Factory, cls).register_options(register)
register('--binary-package-excludes', type=list, fingerprint=True,
default=['com.oracle', 'com.sun', 'java', 'javax', 'jdk', 'oracle', 'sun'],
help='Packages that the shader will exclude for binaries')
cls.register_jvm_tool(register,
'jarjar',
classpath=[
JarDependency(org='org.pantsbuild', name='jarjar', rev='1.6.5')
])
@classmethod
def create(cls, context, executor=None):
"""Creates and returns a new Shader.
:param Executor executor: Optional java executor to run jarjar with.
"""
if executor is None:
executor = SubprocessExecutor(DistributionLocator.cached())
classpath = cls.global_instance().tool_classpath_from_products(context.products, 'jarjar',
cls.options_scope)
return Shader(classpath, executor, cls.global_instance().get_options().binary_package_excludes)
@classmethod
def exclude_package(cls, package_name=None, recursive=False):
"""Excludes the given fully qualified package name from shading.
:param unicode package_name: A fully qualified package_name; eg: `org.pantsbuild`; `None` for
the java default (root) package.
:param bool recursive: `True` to exclude any package with `package_name` as a proper prefix;
`False` by default.
:returns: A `Shader.Rule` describing the shading exclusion.
"""
if not package_name:
return Shading.create_exclude('**' if recursive else '*')
return Shading.create_exclude_package(package_name, recursive=recursive)
@classmethod
def exclude_class(cls, class_name):
"""Excludes the given fully qualified class name from shading.
:param unicode class_name: A fully qualified classname, eg: `org.pantsbuild.tools.jar.Main`.
:returns: A `Shader.Rule` describing the shading exclusion.
"""
return Shading.create_exclude(class_name)
@classmethod
def shade_package(cls, package_name=None, recursive=False):
"""Includes the given fully qualified package name in shading.
:param unicode package_name: A fully qualified package_name; eg: `org.pantsbuild`; `None` for
the java default (root) package.
:param bool recursive: `True` to include any package with `package_name` as a proper prefix;
`False` by default.
:returns: A `Shader.Rule` describing the packages to be shaded.
"""
if not package_name:
return Shading.create_relocate('**' if recursive else '*')
return Shading.create_relocate_package(package_name, recursive=recursive)
@classmethod
def shade_class(cls, class_name):
"""Includes the given fully qualified class in shading.
:param unicode class_name: A fully qualified classname, eg: `org.pantsbuild.tools.jar.Main`.
:returns: A `Shader.Rule` describing the class shading.
"""
return Shading.create_relocate(class_name)
@staticmethod
def _iter_packages(paths):
for path in paths:
yield path.replace('/', '.')
@staticmethod
def _potential_package_path(path):
# TODO(John Sirois): Implement a full valid java package name check, `-` just happens to get
# the common non-package cases like META-INF/...
return (path.endswith('.class') or path.endswith('.java')) and '-' not in path
@classmethod
def _iter_jar_packages(cls, path):
paths = set()
for pathname in ClasspathUtil.classpath_entries_contents([path]):
if cls._potential_package_path(pathname):
package = os.path.dirname(pathname)
if package:
# This check avoids a false positive on things like module-info.class.
# We must never add an empty package, as this will cause every single string
# literal to be rewritten.
paths.add(package)
return cls._iter_packages(paths)
def __init__(self, jarjar_classpath, executor, binary_package_excludes):
"""Creates a `Shader` the will use the given `jarjar` jar to create shaded jars.
:param jarjar_classpath: The jarjar classpath.
:type jarjar_classpath: list of string.
:param executor: A java `Executor` to use to create shaded jar files.
"""
self._jarjar_classpath = jarjar_classpath
self._executor = executor
self._binary_package_excludes = binary_package_excludes
def assemble_binary_rules(self, main, jar, custom_rules=None):
"""Creates an ordered list of rules suitable for fully shading the given binary.
The default rules will ensure the `main` class name is un-changed along with a minimal set of
support classes but that everything else will be shaded.
Any `custom_rules` are given highest precedence and so they can interfere with this automatic
binary shading. In general it's safe to add exclusion rules to open up classes that need to be
shared between the binary and the code it runs over. An example would be excluding the
`org.junit.Test` annotation class from shading since a tool running junit needs to be able
to scan for this annotation inside the user code it tests.
:param unicode main: The main class to preserve as the entry point.
:param unicode jar: The path of the binary jar the `main` class lives in.
:param list custom_rules: An optional list of custom `Shader.Rule`s.
:returns: a precedence-ordered list of `Shader.Rule`s
"""
# If a class is matched by multiple rules, the 1st lexical match wins (see:
# https://code.google.com/p/jarjar/wiki/CommandLineDocs#Rules_file_format).
# As such we 1st ensure the `main` package and the jre packages have exclusion rules and
# then apply a final set of shading rules to everything else at lowest precedence.
# Custom rules take precedence.
rules = list(custom_rules or [])
# Exclude the main entrypoint's package from shading. There may be package-private classes that
# the main class accesses so we must preserve the whole package).
parts = main.rsplit('.', 1)
if len(parts) == 2:
main_package = parts[0]
else:
# There is no package component, so the main class is in the root (default) package.
main_package = None
rules.append(self.exclude_package(main_package))
rules.extend(self.exclude_package(system_pkg, recursive=True)
for system_pkg in self._binary_package_excludes)
# Shade everything else.
#
# NB: A simpler way to do this jumps out - just emit 1 wildcard rule:
#
# rule **.* _shaded_.@1.@2
#
# Unfortunately, as of jarjar 1.4 this wildcard catch-all technique improperly transforms
# resources in the `main_package`. The jarjar binary jar itself has its command line help text
# stored as a resource in its main's package and so using a catch-all like this causes
# recursively shading jarjar with itself using this class to fail!
#
# As a result we explicitly shade all the non `main_package` packages in the binary jar instead
# which does support recursively shading jarjar.
rules.extend(self.shade_package(pkg) for pkg in sorted(self._iter_jar_packages(jar))
if pkg != main_package)
return rules
@contextmanager
def temporary_rules_file(self, rules):
with temporary_file() as fp:
for rule in rules:
fp.write(rule.render())
fp.close()
yield fp.name
@contextmanager
def binary_shader_for_rules(self, output_jar, jar, rules, jvm_options=None):
"""Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`.
No default rules are applied; only the rules passed in as a parameter will be used.
:param unicode output_jar: The path to dump the shaded jar to; will be over-written if it
exists.
:param unicode jar: The path to the jar file to shade.
:param list rules: The rules to apply for shading.
:param list jvm_options: an optional sequence of options for the underlying jvm
:returns: An `Executor.Runner` that can be `run()` to shade the given `jar`.
:rtype: :class:`pants.java.executor.Executor.Runner`
"""
with self.temporary_rules_file(rules) as rules_file:
logger.debug('Running jarjar with rules:\n{}'.format(' '.join(rule.render() for rule in rules)))
yield self._executor.runner(classpath=self._jarjar_classpath,
main='org.pantsbuild.jarjar.Main',
jvm_options=jvm_options,
args=['process', rules_file, jar, output_jar])
def binary_shader(self, output_jar, main, jar, custom_rules=None, jvm_options=None):
"""Yields an `Executor.Runner` that will perform shading of the binary `jar` when `run()`.
The default rules will ensure the `main` class name is un-changed along with a minimal set of
support classes but that everything else will be shaded.
Any `custom_rules` are given highest precedence and so they can interfere with this automatic
binary shading. In general its safe to add exclusion rules to open up classes that need to be
shared between the binary and the code it runs over. An example would be excluding the
`org.junit.Test` annotation class from shading since both a tool running junit needs to be able
to scan for this annotation applied to the user code it tests.
:param unicode output_jar: The path to dump the shaded jar to; will be over-written if it
exists.
:param unicode main: The main class in the `jar` to preserve as the entry point.
:param unicode jar: The path to the jar file to shade.
:param list custom_rules: An optional list of custom `Shader.Rule`s.
:param list jvm_options: an optional sequence of options for the underlying jvm
:returns: An `Executor.Runner` that can be `run()` to shade the given `jar`.
:rtype: :class:`pants.java.executor.Executor.Runner`
"""
all_rules = self.assemble_binary_rules(main, jar, custom_rules=custom_rules)
return self.binary_shader_for_rules(output_jar, jar, all_rules, jvm_options=jvm_options)
|
Gadal/sympy | refs/heads/master | bin/coverage_doctest.py | 83 | #!/usr/bin/env python
"""
Program to test that all methods/functions have at least one example
doctest. Also checks if docstrings are imported into Sphinx. For this to
work, the Sphinx docs need to be built first. Use "cd doc; make html" to
build the Sphinx docs.
Usage:
./bin/coverage_doctest.py sympy/core
or
./bin/coverage_doctest.py sympy/core/basic.py
If no arguments are given, all files in sympy/ are checked.
"""
from __future__ import print_function
import os
import sys
import inspect
from argparse import ArgumentParser, RawDescriptionHelpFormatter
try:
from HTMLParser import HTMLParser
except ImportError:
# It's html.parser in Python 3
from html.parser import HTMLParser
# Load color templates, used from sympy/utilities/runtests.py
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
def print_header(name, underline=None, color=None):
print()
if color:
print("%s%s%s" % (c_color % colors[color], name, c_normal))
else:
print(name)
if underline and not color:
print(underline*len(name))
def print_coverage(module_path, c, c_md, c_mdt, c_idt, c_sph, f, f_md, f_mdt,
f_idt, f_sph, score, total_doctests, total_members,
sphinx_score, total_sphinx, verbose=False, no_color=False,
sphinx=True):
""" Prints details (depending on verbose) of a module """
doctest_color = "Brown"
sphinx_color = "DarkGray"
less_100_color = "Red"
less_50_color = "LightRed"
equal_100_color = "Green"
big_header_color = "LightPurple"
small_header_color = "Purple"
if no_color:
score_string = "Doctests: %s%% (%s of %s)" % (score, total_doctests,
total_members)
elif score < 100:
if score < 50:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[less_50_color], score, total_doctests, total_members, c_normal)
else:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[less_100_color], score, total_doctests, total_members, c_normal)
else:
score_string = "%sDoctests:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[doctest_color], c_normal, c_color % colors[equal_100_color], score, total_doctests, total_members, c_normal)
if sphinx:
if no_color:
sphinx_score_string = "Sphinx: %s%% (%s of %s)" % (sphinx_score,
total_members - total_sphinx, total_members)
elif sphinx_score < 100:
if sphinx_score < 50:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[less_50_color], sphinx_score, total_members - total_sphinx,
total_members, c_normal)
else:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[less_100_color], sphinx_score, total_members -
total_sphinx, total_members, c_normal)
else:
sphinx_score_string = "%sSphinx:%s %s%s%% (%s of %s)%s" % \
(c_color % colors[sphinx_color], c_normal, c_color %
colors[equal_100_color], sphinx_score, total_members -
total_sphinx, total_members, c_normal)
if verbose:
print('\n' + '-'*70)
print(module_path)
print('-'*70)
else:
if sphinx:
print("%s: %s %s" % (module_path, score_string, sphinx_score_string))
else:
print("%s: %s" % (module_path, score_string))
if verbose:
print_header('CLASSES', '*', not no_color and big_header_color)
if not c:
print_header('No classes found!')
else:
if c_md:
print_header('Missing docstrings', '-', not no_color and small_header_color)
for md in c_md:
print(' * ' + md)
if c_mdt:
print_header('Missing doctests', '-', not no_color and small_header_color)
for md in c_mdt:
print(' * ' + md)
if c_idt:
# Use "# indirect doctest" in the docstring to
# supress this warning.
print_header('Indirect doctests', '-', not no_color and small_header_color)
for md in c_idt:
print(' * ' + md)
print('\n Use \"# indirect doctest\" in the docstring to supress this warning')
if c_sph:
print_header('Not imported into Sphinx', '-', not no_color and small_header_color)
for md in c_sph:
print(' * ' + md)
print_header('FUNCTIONS', '*', not no_color and big_header_color)
if not f:
print_header('No functions found!')
else:
if f_md:
print_header('Missing docstrings', '-', not no_color and small_header_color)
for md in f_md:
print(' * ' + md)
if f_mdt:
print_header('Missing doctests', '-', not no_color and small_header_color)
for md in f_mdt:
print(' * ' + md)
if f_idt:
print_header('Indirect doctests', '-', not no_color and small_header_color)
for md in f_idt:
print(' * ' + md)
print('\n Use \"# indirect doctest\" in the docstring to supress this warning')
if f_sph:
print_header('Not imported into Sphinx', '-', not no_color and small_header_color)
for md in f_sph:
print(' * ' + md)
if verbose:
print('\n' + '-'*70)
print(score_string)
if sphinx:
print(sphinx_score_string)
print('-'*70)
def _is_indirect(member, doc):
""" Given string repr of doc and member checks if the member
contains indirect documentation """
d = member in doc
e = 'indirect doctest' in doc
if not d and not e:
return True
else:
return False
def _get_arg_list(name, fobj):
""" Given a function object, constructs a list of arguments
and their defaults. Takes care of varargs and kwargs """
trunc = 20 # Sometimes argument length can be huge
argspec = inspect.getargspec(fobj)
arg_list = []
if argspec.args:
for arg in argspec.args:
arg_list.append(str(arg))
arg_list.reverse()
# Now add the defaults
if argspec.defaults:
for i in range(len(argspec.defaults)):
arg_list[i] = str(arg_list[i]) + '=' + str(argspec.defaults[-i])
# Get the list in right order
arg_list.reverse()
# Add var args
if argspec.varargs:
arg_list.append(argspec.varargs)
if argspec.keywords:
arg_list.append(argspec.keywords)
# Truncate long arguments
arg_list = [x[:trunc] for x in arg_list]
# Construct the parameter string (enclosed in brackets)
str_param = "%s(%s)" % (name, ', '.join(arg_list))
return str_param
def get_mod_name(path, base):
""" Gets a module name, given the path of file/dir and base
dir of sympy """
rel_path = os.path.relpath(path, base)
# Remove the file extension
rel_path, ign = os.path.splitext(rel_path)
# Replace separators by . for module path
file_module = ""
h, t = os.path.split(rel_path)
while h or t:
if t:
file_module = t + '.' + file_module
h, t = os.path.split(h)
return file_module[:-1]
class FindInSphinx(HTMLParser):
is_imported = []
def handle_starttag(self, tag, attr):
a = dict(attr)
if tag == "div" and a.get('class', None) == "viewcode-block":
self.is_imported.append(a['id'])
def find_sphinx(name, mod_path, found={}):
if mod_path in found: # Cache results
return name in found[mod_path]
doc_path = mod_path.split('.')
doc_path[-1] += '.html'
sphinx_path = os.path.join(sympy_top, 'doc', '_build', 'html', '_modules', *doc_path)
if not os.path.exists(sphinx_path):
return False
with open(sphinx_path) as f:
html_txt = f.read()
p = FindInSphinx()
p.feed(html_txt)
found[mod_path] = p.is_imported
return name in p.is_imported
def process_function(name, c_name, b_obj, mod_path, f_sk, f_md, f_mdt, f_idt,
f_has_doctest, sk_list, sph, sphinx=True):
"""
Processes a function to get information regarding documentation.
It is assume that the function calling this subrouting has already
verified that it is a valid module function.
"""
if name in sk_list:
return False, False
# We add in the end, as inspect.getsourcelines is slow
add_md = False
add_mdt = False
add_idt = False
in_sphinx = True
f_doctest = False
function = False
if inspect.isclass(b_obj):
obj = getattr(b_obj, name)
obj_name = c_name + '.' + name
else:
obj = b_obj
obj_name = name
full_name = _get_arg_list(name, obj)
if name.startswith('_'):
f_sk.append(full_name)
else:
if not obj.__doc__:
add_md = True
elif not '>>>' in obj.__doc__:
add_mdt = True
elif _is_indirect(name, obj.__doc__):
add_idt = True
else:
f_doctest = True
function = True
if sphinx:
in_sphinx = find_sphinx(obj_name, mod_path)
if add_md or add_mdt or add_idt or not in_sphinx:
try:
line_no = inspect.getsourcelines(obj)[1]
except IOError:
# Raised when source does not exist
# which means the function is not there.
return False, False
full_name = "LINE %d: %s" % (line_no, full_name)
if add_md:
f_md.append(full_name)
elif add_mdt:
f_mdt.append(full_name)
elif add_idt:
f_idt.append(full_name)
if not in_sphinx:
sph.append(full_name)
return f_doctest, function
def process_class(c_name, obj, c_sk, c_md, c_mdt, c_idt, c_has_doctest,
mod_path, sph, sphinx=True):
"""
Extracts information about the class regarding documentation.
It is assumed that the function calling this subroutine has already
checked that the class is valid.
"""
# Skip class case
if c_name.startswith('_'):
c_sk.append(c_name)
return False, False, None
c = False
c_dt = False
# Get the line number of class
try:
source, line_no = inspect.getsourcelines(obj)
except IOError:
# Raised when source does not exist
# which means the class is not there.
return False, False, None
c = True
full_name = "LINE %d: %s" % (line_no, c_name)
if not obj.__doc__:
c_md.append(full_name)
elif not '>>>' in obj.__doc__:
c_mdt.append(full_name)
elif _is_indirect(c_name, obj.__doc__):
c_idt.append(full_name)
else:
c_dt = True
c_has_doctest.append(full_name)
in_sphinx = False
if sphinx:
in_sphinx = find_sphinx(c_name, mod_path)
if not in_sphinx:
sph.append(full_name)
return c_dt, c, source
def coverage(module_path, verbose=False, no_color=False, sphinx=True):
""" Given a module path, builds an index of all classes and functions
contained. It then goes through each of the classes/functions to get
the docstring and doctest coverage of the module. """
# Import the package and find members
m = None
try:
__import__(module_path)
m = sys.modules[module_path]
except Exception as a:
# Most likely cause, absence of __init__
print("%s could not be loaded due to %s." % (module_path, repr(a)))
return 0, 0, 0
c_skipped = []
c_md = []
c_mdt = []
c_has_doctest = []
c_idt = []
classes = 0
c_doctests = 0
c_sph = []
f_skipped = []
f_md = []
f_mdt = []
f_has_doctest = []
f_idt = []
functions = 0
f_doctests = 0
f_sph = []
skip_members = ['__abstractmethods__']
# Get the list of members
m_members = dir(m)
for member in m_members:
# Check for skipped functions first, they throw nasty errors
# when combined with getattr
if member in skip_members:
continue
# Identify if the member (class/def) a part of this module
obj = getattr(m, member)
obj_mod = inspect.getmodule(obj)
# Function not a part of this module
if not obj_mod or not obj_mod.__name__ == module_path:
continue
# If it's a function
if inspect.isfunction(obj) or inspect.ismethod(obj):
f_dt, f = process_function(member, '', obj, module_path,
f_skipped, f_md, f_mdt, f_idt, f_has_doctest, skip_members,
f_sph, sphinx=sphinx)
if f:
functions += 1
if f_dt:
f_doctests += 1
# If it's a class, look at it's methods too
elif inspect.isclass(obj):
# Process the class first
c_dt, c, source = process_class(member, obj, c_skipped, c_md,
c_mdt, c_idt, c_has_doctest, module_path, c_sph, sphinx=sphinx)
if not c:
continue
else:
classes += 1
if c_dt:
c_doctests += 1
# Iterate through it's members
for f_name in obj.__dict__:
if f_name in skip_members or f_name.startswith('_'):
continue
# Check if def funcname appears in source
if not ("def " + f_name) in ' '.join(source):
continue
# Identify the module of the current class member
f_obj = getattr(obj, f_name)
obj_mod = inspect.getmodule(f_obj)
# Function not a part of this module
if not obj_mod or not obj_mod.__name__ == module_path:
continue
# If it's a function
if inspect.isfunction(f_obj) or inspect.ismethod(f_obj):
f_dt, f = process_function(f_name, member, obj,
module_path, f_skipped, f_md, f_mdt, f_idt, f_has_doctest,
skip_members, f_sph, sphinx=sphinx)
if f:
functions += 1
if f_dt:
f_doctests += 1
# Evaluate the percent coverage
total_doctests = c_doctests + f_doctests
total_members = classes + functions
if total_members:
score = 100 * float(total_doctests) / (total_members)
else:
score = 100
score = int(score)
if sphinx:
total_sphinx = len(c_sph) + len(f_sph)
if total_members:
sphinx_score = 100 - 100 * float(total_sphinx) / total_members
else:
sphinx_score = 100
sphinx_score = int(sphinx_score)
else:
total_sphinx = 0
sphinx_score = 0
# Sort functions/classes by line number
c_md = sorted(c_md, key=lambda x: int(x.split()[1][:-1]))
c_mdt = sorted(c_mdt, key=lambda x: int(x.split()[1][:-1]))
c_idt = sorted(c_idt, key=lambda x: int(x.split()[1][:-1]))
f_md = sorted(f_md, key=lambda x: int(x.split()[1][:-1]))
f_mdt = sorted(f_mdt, key=lambda x: int(x.split()[1][:-1]))
f_idt = sorted(f_idt, key=lambda x: int(x.split()[1][:-1]))
print_coverage(module_path, classes, c_md, c_mdt, c_idt, c_sph, functions, f_md,
f_mdt, f_idt, f_sph, score, total_doctests, total_members,
sphinx_score, total_sphinx, verbose=verbose,
no_color=no_color, sphinx=sphinx)
return total_doctests, total_sphinx, total_members
def go(sympy_top, file, verbose=False, no_color=False, exact=True, sphinx=True):
if os.path.isdir(file):
doctests, total_sphinx, num_functions = 0, 0, 0
for F in os.listdir(file):
_doctests, _total_sphinx, _num_functions = go(sympy_top, '%s/%s' % (file, F),
verbose=verbose, no_color=no_color, exact=False, sphinx=sphinx)
doctests += _doctests
total_sphinx += _total_sphinx
num_functions += _num_functions
return doctests, total_sphinx, num_functions
if (not (file.endswith('.py') or file.endswith('.pyx')) or
file.endswith('__init__.py') or
not exact and ('test_' in file or 'bench_' in file or
any(name in file for name in skip_paths))):
return 0, 0, 0
if not os.path.exists(file):
print("File(%s does not exist." % file)
sys.exit(1)
# Relpath for constructing the module name
return coverage(get_mod_name(file, sympy_top), verbose=verbose,
no_color=no_color, sphinx=sphinx)
if __name__ == "__main__":
bintest_dir = os.path.abspath(os.path.dirname(__file__)) # bin/cover...
sympy_top = os.path.split(bintest_dir)[0] # ../
sympy_dir = os.path.join(sympy_top, 'sympy') # ../sympy/
if os.path.isdir(sympy_dir):
sys.path.insert(0, sympy_top)
usage = "usage: ./bin/doctest_coverage.py PATHS"
parser = ArgumentParser(
description=__doc__,
usage=usage,
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument("path", nargs='*', default=[os.path.join(sympy_top, 'sympy')])
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False)
parser.add_argument("--no-colors", action="store_true", dest="no_color",
help="use no colors", default=False)
parser.add_argument("--no-sphinx", action="store_false", dest="sphinx",
help="don't report Sphinx coverage", default=True)
args = parser.parse_args()
if args.sphinx and not os.path.exists(os.path.join(sympy_top, 'doc', '_build', 'html')):
print("""
Cannot check Sphinx coverage without a documentation build. To build the
docs, run "cd doc; make html". To skip checking Sphinx coverage, pass --no-sphinx.
""")
sys.exit(1)
full_coverage = True
for file in args.path:
file = os.path.normpath(file)
print('DOCTEST COVERAGE for %s' % (file))
print('='*70)
print()
doctests, total_sphinx, num_functions = go(sympy_top, file, verbose=args.verbose,
no_color=args.no_color, sphinx=args.sphinx)
if num_functions == 0:
score = 100
sphinx_score = 100
else:
score = 100 * float(doctests) / num_functions
score = int(score)
if doctests < num_functions:
full_coverage = False
if args.sphinx:
sphinx_score = 100 - 100 * float(total_sphinx) / num_functions
sphinx_score = int(sphinx_score)
if total_sphinx > 0:
full_coverage = False
print()
print('='*70)
if args.no_color:
print("TOTAL DOCTEST SCORE for %s: %s%% (%s of %s)" % \
(get_mod_name(file, sympy_top), score, doctests, num_functions))
elif score < 100:
print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Red"]),
score, doctests, num_functions, c_normal))
else:
print("TOTAL DOCTEST SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Green"]),
score, doctests, num_functions, c_normal))
if args.sphinx:
if args.no_color:
print("TOTAL SPHINX SCORE for %s: %s%% (%s of %s)" % \
(get_mod_name(file, sympy_top), sphinx_score,
num_functions - total_sphinx, num_functions))
elif sphinx_score < 100:
print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Red"]),
sphinx_score, num_functions - total_sphinx, num_functions, c_normal))
else:
print("TOTAL SPHINX SCORE for %s: %s%s%% (%s of %s)%s" % \
(get_mod_name(file, sympy_top), c_color % (colors["Green"]),
sphinx_score, num_functions - total_sphinx, num_functions, c_normal))
print()
sys.exit(not full_coverage)
|
atuljain/odoo | refs/heads/master | addons/base_gengo/__openerp__.py | 68 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Translations through Gengo API',
'version': '0.1',
'category': 'Tools',
'description': """
Automated Translations through Gengo API
========================================
This module will install passive scheduler job for automated translations
using the Gengo API. To activate it, you must
1) Configure your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`
2) Launch the wizard under `Settings > Application Terms > Gengo: Manual Request of Translation` and follow the wizard.
This wizard will activate the CRON job and the Scheduler and will start the automatic translation via Gengo Services for all the terms where you requested it.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['base'],
'data': [
'gengo_sync_schedular_data.xml',
'ir_translation.xml',
'res_company_view.xml',
'wizard/base_gengo_translations_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
voidabhi/cricinfo | refs/heads/master | cricinfo/my_bot.py | 1 | #!/usr/bin/python
import requests
from bs4 import BeautifulSoup
import xmltodict
import click
from ConfigParser import SafeConfigParser
def get_config(key):
""" Fetch config from config file """
parser = SafeConfigParser()
parser.read('../.config')
return parser.get(key)
class Match(object):
""" Represents a cricinfo match """
def __init__(self, title, link, description, guid):
self.title = title
self.link = link
self.description = description
self.guid = guid
@classmethod
def from_xml(self, xml):
""" create object from serialized xml """
item = xmltodict.parse(xml)['item']
return Match(item['title'], item['link'], item['description'], item['guid'])
def __repr__(self):
return '<Match=%s>'%self.title
def get_matches():
"""Fetches matches from the cricinfo url"""
r = requests.get(parser.get('url'))
soup = BeautifulSoup(r.text)
for match in soup.find_all('item'):
yield Match.from_xml(str(match))
def print_matches(matches):
"""Prints all matches to the console."""
click.echo()
for match in matches:
click.secho('%s\t' % match.title, bold=True, fg="red", nl=False)
click.echo()
@click.command()
def main():
"""A cli to Cricinfo to see live scores"""
# fetch matches
matches = get_matches()
# print matches
print_matches(matches)
if __name__ == '__main__':
main()
|
JDShu/SCOPE | refs/heads/master | askbot/forms.py | 1 | """Forms, custom form fields and related utility functions
used in AskBot"""
import re
from django import forms
from askbot import const
from askbot.const import message_keys
from django.forms.util import ErrorList
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy, string_concat
from django.utils.text import get_text_list
from django.contrib.auth.models import User
from django_countries import countries
from askbot.utils.forms import NextUrlField, UserNameField
from askbot.mail import extract_first_email_address
from askbot.models.tag import get_groups
from recaptcha_works.fields import RecaptchaField
from askbot.conf import settings as askbot_settings
from askbot.conf import get_tag_display_filter_strategy_choices
from tinymce.widgets import TinyMCE
import logging
def cleanup_dict(dictionary, key, empty_value):
"""deletes key from dictionary if it exists
and the corresponding value equals the empty_value
"""
if key in dictionary and dictionary[key] == empty_value:
del dictionary[key]
def format_form_errors(form):
"""Formats form errors in HTML
if there is only one error - returns a plain string
if more than one, returns an unordered list of errors
in HTML format.
If there are no errors, returns empty string
"""
if form.errors:
errors = form.errors.values()
if len(errors) == 1:
return errors[0]
else:
result = '<ul>'
for error in errors:
result += '<li>%s</li>' % error
result += '</ul>'
return result
else:
return ''
def clean_marked_tagnames(tagnames):
"""return two strings - one containing tagnames
that are straight names of tags, and the second one
containing names of wildcard tags,
wildcard tags are those that have an asterisk at the end
the function does not verify that the tag names are valid
"""
if askbot_settings.USE_WILDCARD_TAGS is False:
return tagnames, list()
pure_tags = list()
wildcards = list()
for tagname in tagnames:
if tagname == '':
continue
if tagname.endswith('*'):
if tagname.count('*') > 1:
continue
else:
wildcards.append(tagname)
else:
pure_tags.append(tagname)
return pure_tags, wildcards
def filter_choices(remove_choices=None, from_choices=None):
"""a utility function that will remove choice tuples
usable for the forms.ChoicesField from
``from_choices``, the removed ones will be those given
by the ``remove_choice`` list
there is no error checking, ``from_choices`` tuple must be as expected
to work with the forms.ChoicesField
"""
if not isinstance(remove_choices, list):
raise TypeError('remove_choices must be a list')
filtered_choices = tuple()
for choice_to_test in from_choices:
remove = False
for choice in remove_choices:
if choice == choice_to_test[0]:
remove = True
break
if remove is False:
filtered_choices += (choice_to_test, )
return filtered_choices
def need_mandatory_tags():
"""true, if list of mandatory tags is not empty"""
from askbot import models
return (
askbot_settings.TAGS_ARE_REQUIRED
and len(models.tag.get_mandatory_tags()) > 0
)
def mandatory_tag_missing_in_list(tag_strings):
"""true, if mandatory tag is not present in the list
of ``tag_strings``"""
from askbot import models
mandatory_tags = models.tag.get_mandatory_tags()
for mandatory_tag in mandatory_tags:
for tag_string in tag_strings:
if tag_strings_match(tag_string, mandatory_tag):
return False
return True
def tag_strings_match(tag_string, mandatory_tag):
"""true if tag string matches the mandatory tag,
the comparison is not symmetric if tag_string ends with a
wildcard (asterisk)
"""
if mandatory_tag.endswith('*'):
return tag_string.startswith(mandatory_tag[:-1])
else:
return tag_string == mandatory_tag
COUNTRY_CHOICES = (('unknown', _('select country')),) + countries.COUNTRIES
class CountryField(forms.ChoiceField):
"""this is better placed into the django_coutries app"""
def __init__(self, *args, **kwargs):
"""sets label and the country choices
"""
kwargs['choices'] = kwargs.pop('choices', COUNTRY_CHOICES)
kwargs['label'] = kwargs.pop('label', _('Country'))
super(CountryField, self).__init__(*args, **kwargs)
def clean(self, value):
"""Handles case of 'unknown' country selection
"""
if self.required:
if value == 'unknown':
raise forms.ValidationError(_('Country field is required'))
if value == 'unknown':
return None
return value
class CountedWordsField(forms.CharField):
"""a field where a number of words is expected
to be in a certain range"""
def __init__(
self, min_words=0, max_words=9999, field_name=None,
*args, **kwargs
):
self.min_words = min_words
self.max_words = max_words
self.field_name = field_name
super(CountedWordsField, self).__init__(*args, **kwargs)
def clean(self, value):
#todo: this field must be adapted to work with Chinese, etc.
#for that we'll have to count characters instead of words
if value is None:
value = ''
value = value.strip()
word_count = len(value.split())
if word_count < self.min_words:
msg = ungettext_lazy(
'must be > %d word',
'must be > %d words',
self.min_words - 1
) % (self.min_words - 1)
#todo - space is not used in Chinese
raise forms.ValidationError(
string_concat(self.field_name, ' ', msg)
)
if word_count > self.max_words:
msg = ungettext_lazy(
'must be < %d word',
'must be < %d words',
self.max_words + 1
) % (self.max_words + 1)
raise forms.ValidationError(
string_concat(self.field_name, ' ', msg)
)
return value
class DomainNameField(forms.CharField):
"""Field for Internet Domain Names
todo: maybe there is a standard field for this?
"""
def clean(self, value):
#find a better regex, taking into account tlds
domain_re = re.compile(r'[a-zA-Z\d]+(\.[a-zA-Z\d]+)+')
if domain_re.match(value):
return value
else:
raise forms.ValidationError(
'%s is not a valid domain name' % value
)
class TitleField(forms.CharField):
"""Fild receiving exercise title"""
def __init__(self, *args, **kwargs):
super(TitleField, self).__init__(*args, **kwargs)
self.required = kwargs.get('required', True)
self.widget = forms.TextInput(
attrs={'size': 70, 'autocomplete': 'off'}
)
self.max_length = 255
self.label = _('title')
self.help_text = _(
'please enter a descriptive title for your exercise'
)
self.initial = ''
def clean(self, value):
"""cleans the field for minimum and maximum length
also is supposed to work for unicode non-ascii characters"""
if value is None:
value = ''
if len(value) < askbot_settings.MIN_TITLE_LENGTH:
msg = ungettext_lazy(
'title must be > %d character',
'title must be > %d characters',
askbot_settings.MIN_TITLE_LENGTH
) % askbot_settings.MIN_TITLE_LENGTH
raise forms.ValidationError(msg)
encoded_value = value.encode('utf-8')
if len(value) == len(encoded_value):
if len(value) > self.max_length:
raise forms.ValidationError(
_(
'The title is too long, maximum allowed size is '
'%d characters'
) % self.max_length
)
elif len(encoded_value) > self.max_length:
raise forms.ValidationError(
_(
'The title is too long, maximum allowed size is '
'%d bytes'
) % self.max_length
)
return value.strip() # TODO: test me
class EditorField(forms.CharField):
"""EditorField is subclassed by the
:class:`ExerciseEditorField` and :class:`ProblemEditorField`
"""
length_error_template_singular = 'post content must be > %d character',
length_error_template_plural = 'post content must be > %d characters',
min_length = 10 # sentinel default value
def __init__(self, *args, **kwargs):
editor_attrs = kwargs.pop('editor_attrs', {})
super(EditorField, self).__init__(*args, **kwargs)
self.required = True
widget_attrs = {'id': 'editor'}
if askbot_settings.EDITOR_TYPE == 'markdown':
self.widget = forms.Textarea(attrs=widget_attrs)
elif askbot_settings.EDITOR_TYPE == 'tinymce':
self.widget = TinyMCE(attrs=widget_attrs, mce_attrs=editor_attrs)
self.label = _('content')
self.help_text = u''
self.initial = ''
def clean(self, value):
if value is None:
value = ''
if len(value) < self.min_length:
msg = ungettext_lazy(
self.length_error_template_singular,
self.length_error_template_plural,
self.min_length
) % self.min_length
raise forms.ValidationError(msg)
return value
class ExerciseEditorField(EditorField):
"""Editor field for the exercises"""
def __init__(self, *args, **kwargs):
super(ExerciseEditorField, self).__init__(*args, **kwargs)
self.length_error_template_singular = \
'exercise body must be > %d character'
self.length_error_template_plural = \
'exercise body must be > %d characters'
self.min_length = askbot_settings.MIN_EXERCISE_BODY_LENGTH
class ProblemEditorField(EditorField):
"""Editor field for problems"""
def __init__(self, *args, **kwargs):
super(ProblemEditorField, self).__init__(*args, **kwargs)
self.length_error_template_singular = \
'problem must be > %d character'
self.length_error_template_plural = \
'problem must be > %d characters'
self.min_length = askbot_settings.MIN_PROBLEM_BODY_LENGTH
class SolutionEditorField(EditorField):
"""Editor field for problems"""
def __init__(self, *args, **kwargs):
super(SolutionEditorField, self).__init__(*args, **kwargs)
self.length_error_template_singular = 'solution must be > %d character'
self.length_error_template_plural = 'solution must be > %d characters'
self.min_length = askbot_settings.MIN_PROBLEM_BODY_LENGTH
def clean_tag(tag_name):
"""a function that cleans a single tag name"""
tag_length = len(tag_name)
if tag_length > askbot_settings.MAX_TAG_LENGTH:
#singular form is odd in english, but required for pluralization
#in other languages
msg = ungettext_lazy(
#odd but added for completeness
'each tag must be shorter than %(max_chars)d character',
'each tag must be shorter than %(max_chars)d characters',
tag_length
) % {'max_chars': tag_length}
raise forms.ValidationError(msg)
#todo - this needs to come from settings
tagname_re = re.compile(const.TAG_REGEX, re.UNICODE)
if not tagname_re.search(tag_name):
raise forms.ValidationError(
_(message_keys.TAG_WRONG_CHARS_MESSAGE)
)
if askbot_settings.FORCE_LOWERCASE_TAGS:
#a simpler way to handle tags - just lowercase thew all
return tag_name.lower()
else:
try:
from askbot import models
stored_tag = models.Tag.objects.get(name__iexact=tag_name)
return stored_tag.name
except models.Tag.DoesNotExist:
return tag_name
class TagNamesField(forms.CharField):
"""field that receives AskBot tag names"""
def __init__(self, *args, **kwargs):
super(TagNamesField, self).__init__(*args, **kwargs)
self.required = kwargs.get('required',
askbot_settings.TAGS_ARE_REQUIRED)
self.widget = forms.TextInput(
attrs={'size': 50, 'autocomplete': 'off'}
)
self.max_length = 255
self.error_messages['max_length'] = _(
'We ran out of space for recording the tags. '
'Please shorten or delete some of them.'
)
self.label = _('tags')
self.help_text = ungettext_lazy(
'Tags are short keywords, with no spaces within. '
'Up to %(max_tags)d tag can be used.',
'Tags are short keywords, with no spaces within. '
'Up to %(max_tags)d tags can be used.',
askbot_settings.MAX_TAGS_PER_POST
) % {'max_tags': askbot_settings.MAX_TAGS_PER_POST}
self.initial = ''
def clean(self, value):
from askbot import models
value = super(TagNamesField, self).clean(value)
data = value.strip()
if len(data) < 1:
if askbot_settings.TAGS_ARE_REQUIRED:
raise forms.ValidationError(
_(message_keys.TAGS_ARE_REQUIRED_MESSAGE)
)
else:
#don't test for required characters when tags is ''
return ''
split_re = re.compile(const.TAG_SPLIT_REGEX)
tag_strings = split_re.split(data)
entered_tags = []
tag_count = len(tag_strings)
if tag_count > askbot_settings.MAX_TAGS_PER_POST:
max_tags = askbot_settings.MAX_TAGS_PER_POST
msg = ungettext_lazy(
'please use %(tag_count)d tag or less',
'please use %(tag_count)d tags or less',
tag_count) % {'tag_count': max_tags}
raise forms.ValidationError(msg)
if need_mandatory_tags():
if mandatory_tag_missing_in_list(tag_strings):
msg = _(
'At least one of the following tags is required : %(tags)s'
) % {'tags': get_text_list(models.tag.get_mandatory_tags())}
raise forms.ValidationError(msg)
cleaned_entered_tags = list()
for tag in tag_strings:
cleaned_tag = clean_tag(tag)
if cleaned_tag not in cleaned_entered_tags:
cleaned_entered_tags.append(clean_tag(tag))
result = u' '.join(cleaned_entered_tags)
if len(result) > 125:#magic number!, the same as max_length in db
raise forms.ValidationError(self.error_messages['max_length'])
return u' '.join(cleaned_entered_tags)
class WikiField(forms.BooleanField):
"""Rendered as checkbox turning post into
"community wiki"
"""
def __init__(self, *args, **kwargs):
super(WikiField, self).__init__(*args, **kwargs)
self.required = False
self.initial = False
self.label = _(
'community wiki (karma is not awarded & '
'many others can edit wiki post)'
)
self.help_text = _(
'if you choose community wiki option, the exercise '
'and problem do not generate points and name of '
'author will not be shown'
)
def clean(self, value):
return value and askbot_settings.WIKI_ON
class EmailNotifyField(forms.BooleanField):
"""Rendered as checkbox which turns on
email notifications on the post"""
def __init__(self, *args, **kwargs):
super(EmailNotifyField, self).__init__(*args, **kwargs)
self.required = False
self.widget.attrs['class'] = 'nomargin'
class SummaryField(forms.CharField):
def __init__(self, *args, **kwargs):
super(SummaryField, self).__init__(*args, **kwargs)
self.required = False
self.widget = forms.TextInput(
attrs={'size': 50, 'autocomplete': 'off'}
)
self.max_length = 300
self.label = _('update summary:')
self.help_text = _(
'enter a brief summary of your revision (e.g. '
'fixed spelling, grammar, improved style, this '
'field is optional)'
)
class EditorForm(forms.Form):
"""form with one field - `editor`
the field must be created dynamically, so it's added
in the __init__() function"""
def __init__(self, editor_attrs=None):
super(EditorForm, self).__init__()
editor_attrs = editor_attrs or {}
self.fields['editor'] = EditorField(editor_attrs=editor_attrs)
class DumpUploadForm(forms.Form):
"""This form handles importing
data into the forum. At the moment it only
supports stackexchange import.
"""
dump_file = forms.FileField()
class ShowExerciseForm(forms.Form):
"""Cleans data necessary to access problems and comments
by the respective comment or problem id - necessary
when comments would be normally wrapped and/or displayed
on the page other than the first page of problems to an exercise.
Same for the problems that are shown on the later pages.
"""
problem = forms.IntegerField(required=False)
comment = forms.IntegerField(required=False)
page = forms.IntegerField(required=False)
sort = forms.CharField(required=False)
def __init__(self, data, default_sort_method):
super(ShowExerciseForm, self).__init__(data)
self.default_sort_method = default_sort_method
def get_pruned_data(self):
nones = ('problem', 'comment', 'page')
for key in nones:
if key in self.cleaned_data:
if self.cleaned_data[key] is None:
del self.cleaned_data[key]
if 'sort' in self.cleaned_data:
if self.cleaned_data['sort'] == '':
del self.cleaned_data['sort']
return self.cleaned_data
def clean(self):
"""this form must always be valid
should use defaults if the data is incomplete
or invalid"""
if self._errors:
#since the form is always valid, clear the errors
logging.error(unicode(self._errors))
self._errors = {}
in_data = self.get_pruned_data()
out_data = dict()
if ('problem' in in_data) ^ ('comment' in in_data):
out_data['show_page'] = None
out_data['problem_sort_method'] = 'votes'
out_data['show_comment'] = in_data.get('comment', None)
out_data['show_problem'] = in_data.get('problem', None)
else:
out_data['show_page'] = in_data.get('page', 1)
out_data['problem_sort_method'] = in_data.get(
'sort',
self.default_sort_method
)
out_data['show_comment'] = None
out_data['show_problem'] = None
self.cleaned_data = out_data
return out_data
class ChangeUserReputationForm(forms.Form):
"""Form that allows moderators and site administrators
to adjust reputation of users.
this form internally verifies that user who claims to
be a moderator acually is
"""
user_reputation_delta = forms.IntegerField(
min_value=1,
label=_(
'Enter number of points to add or subtract'
)
)
comment = forms.CharField(max_length=128)
def clean_comment(self):
if 'comment' in self.cleaned_data:
comment = self.cleaned_data['comment'].strip()
if comment == '':
del self.cleaned_data['comment']
raise forms.ValidationError('Please enter non-empty comment')
self.cleaned_data['comment'] = comment
return comment
MODERATOR_STATUS_CHOICES = (
('a', _('approved')),
('w', _('watched')),
('s', _('suspended')),
('b', _('blocked')),
)
ADMINISTRATOR_STATUS_CHOICES = (('d', _('administrator')),
('m', _('moderator')), ) \
+ MODERATOR_STATUS_CHOICES
class ChangeUserStatusForm(forms.Form):
"""form that allows moderators to change user's status
the type of options displayed depend on whether user
is a moderator or a site administrator as well as
what is the current status of the moderated user
for example moderators cannot moderate other moderators
and admins. Admins can take away admin status, but cannot
add it (that can be done through the Django Admin interface
this form is to be displayed in the user profile under
"moderation" tab
"""
user_status = forms.ChoiceField(label=_('Change status to'))
def __init__(self, *arg, **kwarg):
moderator = kwarg.pop('moderator')
subject = kwarg.pop('subject')
super(ChangeUserStatusForm, self).__init__(*arg, **kwarg)
#select user_status_choices depending on status of the moderator
if moderator.is_administrator():
user_status_choices = ADMINISTRATOR_STATUS_CHOICES
elif moderator.is_moderator():
user_status_choices = MODERATOR_STATUS_CHOICES
if subject.is_moderator() and subject != moderator:
raise ValueError('moderator cannot moderate another moderator')
else:
raise ValueError('moderator or admin expected from "moderator"')
#remove current status of the "subject" user from choices
user_status_choices = filter_choices(
remove_choices=[subject.status, ],
from_choices=user_status_choices
)
#add prompt option
user_status_choices = (('select', _('which one?')), ) \
+ user_status_choices
self.fields['user_status'].choices = user_status_choices
#set prompt option as default
self.fields['user_status'].default = 'select'
self.moderator = moderator
self.subject = subject
def clean(self):
#if moderator is looking at own profile - do not
#let change status
if 'user_status' in self.cleaned_data:
user_status = self.cleaned_data['user_status']
#does not make sense to change own user status
#if necessary, this can be done from the Django admin interface
if self.moderator == self.subject:
del self.cleaned_data['user_status']
raise forms.ValidationError(_('Cannot change own status'))
#do not let moderators turn other users into moderators
if self.moderator.is_moderator() and user_status == 'moderator':
del self.cleanded_data['user_status']
raise forms.ValidationError(
_('Cannot turn other user to moderator')
)
#do not allow moderator to change status of other moderators
if self.moderator.is_moderator() and self.subject.is_moderator():
del self.cleaned_data['user_status']
raise forms.ValidationError(
_('Cannot change status of another moderator')
)
#do not allow moderator to change to admin
if self.moderator.is_moderator() and user_status == 'd':
raise forms.ValidationError(
_("Cannot change status to admin")
)
if user_status == 'select':
del self.cleaned_data['user_status']
msg = _(
'If you wish to change %(username)s\'s status, '
'please make a meaningful selection.'
) % {'username': self.subject.username}
raise forms.ValidationError(msg)
return self.cleaned_data
class SendMessageForm(forms.Form):
subject_line = forms.CharField(
label=_('Subject line'),
max_length=64,
widget=forms.TextInput(attrs={'size': 64}, )
)
body_text = forms.CharField(
label=_('Message text'),
max_length=1600,
widget=forms.Textarea(attrs={'cols': 64})
)
class NotARobotForm(forms.Form):
recaptcha = RecaptchaField(
private_key=askbot_settings.RECAPTCHA_SECRET,
public_key=askbot_settings.RECAPTCHA_KEY
)
class FeedbackForm(forms.Form):
name = forms.CharField(label=_('Your name (optional):'), required=False)
email = forms.EmailField(label=_('Email:'), required=False)
message = forms.CharField(
label=_('Your message:'),
max_length=800,
widget=forms.Textarea(attrs={'cols': 60})
)
no_email = forms.BooleanField(
label=_("I don't want to give my email or receive a response:"),
required=False
)
next = NextUrlField()
def __init__(self, is_auth=False, *args, **kwargs):
super(FeedbackForm, self).__init__(*args, **kwargs)
self.is_auth = is_auth
if not is_auth:
if askbot_settings.USE_RECAPTCHA:
self._add_recaptcha_field()
def _add_recaptcha_field(self):
self.fields['recaptcha'] = RecaptchaField(
private_key=askbot_settings.RECAPTCHA_SECRET,
public_key=askbot_settings.RECAPTCHA_KEY
)
def clean(self):
super(FeedbackForm, self).clean()
if not self.is_auth:
if not self.cleaned_data['no_email'] \
and not self.cleaned_data['email']:
msg = _('Please mark "I dont want to give my mail" field.')
self._errors['email'] = self.error_class([msg])
return self.cleaned_data
class FormWithHideableFields(object):
"""allows to swap a field widget to HiddenInput() and back"""
def hide_field(self, name):
"""replace widget with HiddenInput()
and save the original in the __hidden_fields dictionary
"""
if not hasattr(self, '__hidden_fields'):
self.__hidden_fields = dict()
if name in self.__hidden_fields:
return
self.__hidden_fields[name] = self.fields[name].widget
self.fields[name].widget = forms.HiddenInput()
def show_field(self, name):
"""restore the original widget on the field
if it was previously hidden
"""
if name in self.__hidden_fields:
self.fields[name] = self.__hidden_fields.pop(name)
class PostPrivatelyForm(forms.Form, FormWithHideableFields):
"""has a single field `post_privately` with
two related methods"""
post_privately = forms.BooleanField(
label = _('keep private within your groups'),
required = False
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
self._user = user
super(PostPrivatelyForm, self).__init__(*args, **kwargs)
if self.allows_post_privately() == False:
self.hide_field('post_privately')
def allows_post_privately(self):
user = self._user
return (
user and user.is_authenticated() and \
user.can_make_group_private_posts()
)
def clean_post_privately(self):
if self.allows_post_privately() == False:
self.cleaned_data['post_privately'] = False
return self.cleaned_data['post_privately']
class DraftExerciseForm(forms.Form):
"""No real validation required for this form"""
title = forms.CharField(required=False)
text = forms.CharField(required=False)
tagnames = forms.CharField(required=False)
class DraftProblemForm(forms.Form):
"""Only thread_id is required"""
thread_id = forms.IntegerField()
text = forms.CharField(required=False)
class PostAsSomeoneForm(forms.Form):
post_author_username = forms.CharField(
initial=_('User name:'),
help_text=_(
'Enter name to post on behalf of someone else. '
'Can create new accounts.'
),
required=False,
widget=forms.TextInput()
)
post_author_email = forms.CharField(
initial=_('Email address:'),
required=False,
widget=forms.TextInput(attrs={'class': 'tipped-input'})
)
def get_post_user(self, user):
"""returns user on whose behalf the post or a revision
is being made
"""
username = self.cleaned_data['post_author_username']
email= self.cleaned_data['post_author_email']
if user.is_administrator() and username and email:
post_user = user.get_or_create_fake_user(username, email)
else:
post_user = user
return post_user
def clean_post_author_username(self):
"""if value is the same as initial, it is reset to
empty string
todo: maybe better to have field where initial value is invalid,
then we would not have to have two almost identical clean functions?
"""
username = self.cleaned_data.get('post_author_username', '')
initial_username = unicode(self.fields['post_author_username'].initial)
if username == initial_username:
self.cleaned_data['post_author_username'] = ''
return self.cleaned_data['post_author_username']
def clean_post_author_email(self):
"""if value is the same as initial, it is reset to
empty string"""
email = self.cleaned_data.get('post_author_email', '')
initial_email = unicode(self.fields['post_author_email'].initial)
if email == initial_email:
email = ''
if email != '':
email = forms.EmailField().clean(email)
self.cleaned_data['post_author_email'] = email
return email
def clean(self):
"""requires email address if user name is given"""
username = self.cleaned_data.get('post_author_username', '')
email = self.cleaned_data.get('post_author_email', '')
if username == '' and email:
username_errors = self._errors.get(
'post_author_username',
ErrorList()
)
username_errors.append(_('User name is required with the email'))
self._errors['post_author_username'] = username_errors
raise forms.ValidationError('missing user name')
elif email == '' and username:
email_errors = self._errors.get('post_author_email', ErrorList())
email_errors.append(_('Email is required if user name is added'))
self._errors['post_author_email'] = email_errors
raise forms.ValidationError('missing email')
return self.cleaned_data
class AskForm(PostAsSomeoneForm, PostPrivatelyForm):
"""the form used to askbot exercises
field ask_anonymously is shown to the user if the
if ALLOW_ASK_ANONYMOUSLY live setting is True
however, for simplicity, the value will always be present
in the cleaned data, and will evaluate to False if the
settings forbids anonymous asking
"""
title = TitleField()
tags = TagNamesField()
wiki = WikiField()
group_id = forms.IntegerField(required = False, widget = forms.HiddenInput)
ask_anonymously = forms.BooleanField(
label=_('ask anonymously'),
help_text=_(
'Check if you do not want to reveal your name '
'when asking this exercise'
),
required=False,
)
openid = forms.CharField(
required=False, max_length=255,
widget=forms.TextInput(attrs={'size': 40, 'class': 'openid-input'})
)
def __init__(self, *args, **kwargs):
super(AskForm, self).__init__(*args, **kwargs)
#it's important that this field is set up dynamically
self.fields['text'] = ExerciseEditorField()
#hide ask_anonymously field
if askbot_settings.ALLOW_ASK_ANONYMOUSLY is False:
self.hide_field('ask_anonymously')
def clean_ask_anonymously(self):
"""returns false if anonymous asking is not allowed
"""
if askbot_settings.ALLOW_ASK_ANONYMOUSLY is False:
self.cleaned_data['ask_anonymously'] = False
return self.cleaned_data['ask_anonymously']
ASK_BY_EMAIL_SUBJECT_HELP = _(
'Subject line is expected in the format: '
'[tag1, tag2, tag3,...] exercise title'
)
#widgetforms
class AskWidgetForm(forms.Form, FormWithHideableFields):
'''Simple form with just the title to add an exercise'''
title = TitleField()
ask_anonymously = forms.BooleanField(
label=_('ask anonymously'),
help_text=_(
'Check if you do not want to reveal your name '
'when asking this exercise'
),
required=False,
)
def __init__(self, include_text=True, *args, **kwargs):
super(AskWidgetForm, self).__init__(*args, **kwargs)
#hide ask_anonymously field
if not askbot_settings.ALLOW_ASK_ANONYMOUSLY:
self.hide_field('ask_anonymously')
self.fields['text'] = ExerciseEditorField()
if not include_text:
self.hide_field('text')
#hack to make it validate
self.fields['text'].required = False
self.fields['text'].min_length = 0
class CreateAskWidgetForm(forms.Form, FormWithHideableFields):
title = forms.CharField(max_length=100)
include_text_field = forms.BooleanField(required=False)
inner_style = forms.CharField(
widget=forms.Textarea,
required=False
)
outer_style = forms.CharField(
widget=forms.Textarea,
required=False
)
def __init__(self, *args, **kwargs):
from askbot.models import Tag
super(CreateAskWidgetForm, self).__init__(*args, **kwargs)
self.fields['group'] = forms.ModelChoiceField(
queryset=get_groups().exclude_personal(),
required=False
)
self.fields['tag'] = forms.ModelChoiceField(queryset=Tag.objects.get_content_tags(),
required=False)
if not askbot_settings.GROUPS_ENABLED:
self.hide_field('group')
class CreateExerciseWidgetForm(forms.Form, FormWithHideableFields):
title = forms.CharField(max_length=100)
exercise_number = forms.CharField(initial='7')
tagnames = forms.CharField(label=_('tags'), max_length=50)
search_query = forms.CharField(max_length=50, required=False)
order_by = forms.ChoiceField(
choices=const.SEARCH_ORDER_BY,
initial='-added_at'
)
style = forms.CharField(
widget=forms.Textarea,
initial=const.DEFAULT_EXERCISE_WIDGET_STYLE,
required=False
)
def __init__(self, *args, **kwargs):
super(CreateExerciseWidgetForm, self).__init__(*args, **kwargs)
self.fields['tagnames'] = TagNamesField()
self.fields['group'] = forms.ModelChoiceField(
queryset=get_groups().exclude(name__startswith='_internal'),
required=False
)
class AskByEmailForm(forms.Form):
""":class:`~askbot.forms.AskByEmailForm`
validates exercise data, where exercise was posted
by email.
It is ivoked by the management command
:mod:`~askbot.management.commands.post_emailed_exercises`
Input is text data with attributes:
* :attr:`~askbot.forms.AskByEmailForm.sender` - unparsed "from" data
* :attr:`~askbot.forms.AskByEmailForm.subject` - subject line
* :attr:`~askbot.forms.AskByEmailForm.body_text` - body text of the email
Cleaned values are:
* ``email`` - email address
* ``title`` - exercise title
* ``tagnames`` - tag names all in one string
* ``body_text`` - body of exercise text -
a pass-through, no extra validation
"""
sender = forms.CharField(max_length=255)
subject = forms.CharField(
max_length=255,
error_messages={
'required': ASK_BY_EMAIL_SUBJECT_HELP
}
)
body_text = ExerciseEditorField()
def clean_sender(self):
"""Cleans the :attr:`~askbot.forms.AskByEmail.sender` attribute
If the field is valid, cleaned data will receive value ``email``
"""
raw_email = self.cleaned_data['sender']
email = extract_first_email_address(raw_email)
if email is None:
raise forms.ValidationError('Could not extract email address')
self.cleaned_data['email'] = email
return self.cleaned_data['sender']
def clean_subject(self):
"""Cleans the :attr:`~askbot.forms.AskByEmail.subject` attribute
If the field is valid, cleaned data will receive values
``tagnames`` and ``title``
"""
raw_subject = self.cleaned_data['subject'].strip()
if askbot_settings.TAGS_ARE_REQUIRED:
subject_re = re.compile(r'^\[([^]]+)\](.*)$')
else:
subject_re = re.compile(r'^(?:\[([^]]+)\])?(.*)$')
match = subject_re.match(raw_subject)
if match:
#make raw tags comma-separated
if match.group(1) is None: # no tags
self.cleaned_data['tagnames'] = ''
else:
tagnames = match.group(1).replace(';', ',')
#pre-process tags
tag_list = [tag.strip() for tag in tagnames.split(',')]
tag_list = [re.sub(r'\s+', ' ', tag) for tag in tag_list]
if askbot_settings.REPLACE_SPACE_WITH_DASH_IN_EMAILED_TAGS:
tag_list = [tag.replace(' ', '-') for tag in tag_list]
#todo: use tag separator char here
tagnames = ' '.join(tag_list)
#clean tags - may raise ValidationError
self.cleaned_data['tagnames'] = TagNamesField().clean(tagnames)
#clean title - may raise ValidationError
title = match.group(2).strip()
self.cleaned_data['title'] = TitleField().clean(title)
else:
raise forms.ValidationError(ASK_BY_EMAIL_SUBJECT_HELP)
return self.cleaned_data['subject']
class ProblemForm(PostAsSomeoneForm, PostPrivatelyForm):
text = ProblemEditorField()
wiki = WikiField()
openid = forms.CharField(
required=False, max_length=255,
widget=forms.TextInput(attrs={'size': 40, 'class': 'openid-input'})
)
email_notify = EmailNotifyField(initial=False)
def __init__(self, *args, **kwargs):
super(ProblemForm, self).__init__(*args, **kwargs)
self.fields['text'] = ProblemEditorField()
self.fields['email_notify'].widget.attrs['id'] = \
'exercise-subscribe-updates'
class SolutionForm(PostAsSomeoneForm, PostPrivatelyForm):
text = SolutionEditorField()
wiki = WikiField()
openid = forms.CharField(
required=False, max_length=255,
widget=forms.TextInput(attrs={'size': 40, 'class': 'openid-input'})
)
email_notify = EmailNotifyField(initial=False)
def __init__(self, *args, **kwargs):
super(SolutionForm, self).__init__(*args, **kwargs)
self.fields['text'] = SolutionEditorField()
self.fields['email_notify'].widget.attrs['id'] = \
'exercise-subscribe-updates'
class VoteForm(forms.Form):
"""form used in ajax vote view (only comment_upvote so far)
"""
post_id = forms.IntegerField()
# char because it is 'true' or 'false' as string
cancel_vote = forms.CharField()
def clean_cancel_vote(self):
val = self.cleaned_data['cancel_vote']
if val == 'true':
result = True
elif val == 'false':
result = False
else:
del self.cleaned_data['cancel_vote']
raise forms.ValidationError(
'either "true" or "false" strings expected'
)
self.cleaned_data['cancel_vote'] = result
return self.cleaned_data['cancel_vote']
class CloseForm(forms.Form):
reason = forms.ChoiceField(choices=const.CLOSE_REASONS)
class RetagExerciseForm(forms.Form):
tags = TagNamesField()
def __init__(self, exercise, *args, **kwargs):
"""initialize the default values"""
super(RetagExerciseForm, self).__init__(*args, **kwargs)
self.fields['tags'].initial = exercise.thread.tagnames
class RevisionForm(forms.Form):
"""
Lists revisions of a Exercise or Problem
"""
revision = forms.ChoiceField(
widget=forms.Select(
attrs={'style': 'width:520px'}
)
)
def __init__(self, post, latest_revision, *args, **kwargs):
super(RevisionForm, self).__init__(*args, **kwargs)
revisions = post.revisions.values_list(
'revision', 'author__username', 'revised_at', 'summary'
)
date_format = '%c'
rev_choices = list()
for r in revisions:
rev_details = u'%s - %s (%s) %s' % (
r[0], r[1], r[2].strftime(date_format), r[3]
)
rev_choices.append((r[0], rev_details))
self.fields['revision'].choices = rev_choices
self.fields['revision'].initial = latest_revision.revision
class EditExerciseForm(PostAsSomeoneForm, PostPrivatelyForm):
title = TitleField()
tags = TagNamesField()
summary = SummaryField()
wiki = WikiField()
reveal_identity = forms.BooleanField(
help_text=_(
'You have asked this exercise anonymously, '
'if you decide to reveal your identity, please check '
'this box.'
),
label=_('reveal identity'),
required=False,
)
#todo: this is odd that this form takes exercise as an argument
def __init__(self, *args, **kwargs):
"""populate EditExerciseForm with initial data"""
self.exercise = kwargs.pop('exercise')
self.user = kwargs['user']#preserve for superclass
revision = kwargs.pop('revision')
super(EditExerciseForm, self).__init__(*args, **kwargs)
#it is important to add this field dynamically
self.fields['text'] = ExerciseEditorField()
self.fields['title'].initial = revision.title
self.fields['text'].initial = revision.text
self.fields['tags'].initial = revision.tagnames
self.fields['wiki'].initial = self.exercise.wiki
#hide the reveal identity field
if not self.can_stay_anonymous():
self.hide_field('reveal_identity')
def has_changed(self):
if super(EditExerciseForm, self).has_changed():
return True
if askbot_settings.GROUPS_ENABLED:
return self.exercise.is_private() \
!= self.cleaned_data['post_privately']
else:
return False
def can_stay_anonymous(self):
"""determines if the user cat keep editing the exercise
anonymously"""
return (askbot_settings.ALLOW_ASK_ANONYMOUSLY
and self.exercise.is_anonymous
and self.user.is_owner_of(self.exercise)
)
def clean_reveal_identity(self):
"""cleans the reveal_identity field
which determines whether previous anonymous
edits must be rewritten as not anonymous
this does not necessarily mean that the edit will be anonymous
only does real work when exercise is anonymous
based on the following truth table:
is_anon can owner checked cleaned data
- * * * False (ignore choice in checkbox)
+ + + + True
+ + + - False
+ + - + Raise(Not owner)
+ + - - False
+ - + + True (setting "can" changed, say yes)
+ - + - False, warn (but prev edits stay anon)
+ - - + Raise(Not owner)
+ - - - False
"""
value = self.cleaned_data['reveal_identity']
if self.exercise.is_anonymous:
if value is True:
if self.user.is_owner_of(self.exercise):
#regardless of the ALLOW_ASK_ANONYMOUSLY
return True
else:
self.show_field('reveal_identity')
del self.cleaned_data['reveal_identity']
raise forms.ValidationError(
_(
'Sorry, only owner of the anonymous '
'exercise can reveal his or her '
'identity, please uncheck the '
'box'
)
)
else:
can_ask_anon = askbot_settings.ALLOW_ASK_ANONYMOUSLY
is_owner = self.user.is_owner_of(self.exercise)
if can_ask_anon is False and is_owner:
self.show_field('reveal_identity')
raise forms.ValidationError(
_(
'Sorry, apparently rules have just changed - '
'it is no longer possible to ask anonymously. '
'Please either check the "reveal identity" box '
'or reload this page and try editing the exercise '
'again.'
)
)
return False
else:
#takes care of 8 possibilities - first row of the table
return False
def clean(self):
"""Purpose of this function is to determine whether
it is ok to apply edit anonymously in the synthetic
field edit_anonymously. It relies on correct cleaning
if the "reveal_identity" field
"""
super(EditExerciseForm, self).clean()
reveal_identity = self.cleaned_data.get('reveal_identity', False)
stay_anonymous = False
if reveal_identity is False and self.can_stay_anonymous():
stay_anonymous = True
self.cleaned_data['stay_anonymous'] = stay_anonymous
return self.cleaned_data
class EditProblemForm(PostAsSomeoneForm, PostPrivatelyForm):
summary = SummaryField()
wiki = WikiField()
def __init__(self, problem, revision, *args, **kwargs):
self.problem = problem
super(EditProblemForm, self).__init__(*args, **kwargs)
#it is important to add this field dynamically
self.fields['text'] = ProblemEditorField()
self.fields['text'].initial = revision.text
self.fields['wiki'].initial = problem.wiki
def has_changed(self):
#todo: this function is almost copy/paste of EditExerciseForm.has_changed()
if super(EditProblemForm, self).has_changed():
return True
if askbot_settings.GROUPS_ENABLED:
return self.problem.is_private() \
!= self.cleaned_data['post_privately']
else:
return False
class EditTagWikiForm(forms.Form):
text = forms.CharField(required=False)
tag_id = forms.IntegerField()
class EditUserForm(forms.Form):
email = forms.EmailField(
label=u'Email',
required=True,
max_length=255,
widget=forms.TextInput(attrs={'size': 35})
)
realname = forms.CharField(
label=_('Real name'),
required=False,
max_length=255,
widget=forms.TextInput(attrs={'size': 35})
)
website = forms.URLField(
label=_('Website'),
required=False,
max_length=255,
widget=forms.TextInput(attrs={'size': 35})
)
city = forms.CharField(
label=_('City'),
required=False,
max_length=255,
widget=forms.TextInput(attrs={'size': 35})
)
country = CountryField(required=False)
show_country = forms.BooleanField(
label=_('Show country'),
required=False
)
show_marked_tags = forms.BooleanField(
label=_('Show tag choices'),
required=False
)
birthday = forms.DateField(
label=_('Date of birth'),
help_text=_(
'will not be shown, used to calculate '
'age, format: YYYY-MM-DD'
),
required=False,
widget=forms.TextInput(attrs={'size': 35})
)
about = forms.CharField(
label=_('Profile'),
required=False,
widget=forms.Textarea(attrs={'cols': 60})
)
def __init__(self, user, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
logging.debug('initializing the form')
if askbot_settings.EDITABLE_SCREEN_NAME:
self.fields['username'] = UserNameField(label=_('Screen name'))
self.fields['username'].initial = user.username
self.fields['username'].user_instance = user
self.fields['email'].initial = user.email
self.fields['realname'].initial = user.real_name
self.fields['website'].initial = user.website
self.fields['city'].initial = user.location
if user.country is None:
country = 'unknown'
else:
country = user.country
self.fields['country'].initial = country
self.fields['show_country'].initial = user.show_country
self.fields['show_marked_tags'].initial = user.show_marked_tags
if user.date_of_birth is not None:
self.fields['birthday'].initial = user.date_of_birth
self.fields['about'].initial = user.about
self.user = user
def clean_email(self):
"""For security reason one unique email in database"""
if self.user.email != self.cleaned_data['email']:
#todo dry it, there is a similar thing in openidauth
if askbot_settings.EMAIL_UNIQUE is True:
if 'email' in self.cleaned_data:
try:
User.objects.get(email=self.cleaned_data['email'])
except User.DoesNotExist:
return self.cleaned_data['email']
except User.MultipleObjectsReturned:
raise forms.ValidationError(_(
'this email has already been registered, '
'please use another one')
)
raise forms.ValidationError(_(
'this email has already been registered, '
'please use another one')
)
return self.cleaned_data['email']
class TagFilterSelectionForm(forms.ModelForm):
email_tag_filter_strategy = forms.ChoiceField(
initial = const.EXCLUDE_IGNORED,
label = _('Choose email tag filter'),
widget = forms.RadioSelect
)
def __init__(self, *args, **kwargs):
super(TagFilterSelectionForm, self).__init__(*args, **kwargs)
choices = get_tag_display_filter_strategy_choices()
self.fields['email_tag_filter_strategy'].choices = choices
class Meta:
model = User
fields = ('email_tag_filter_strategy',)
def save(self):
before = self.instance.email_tag_filter_strategy
super(TagFilterSelectionForm, self).save()
after = self.instance.email_tag_filter_strategy
if before != after:
return True
return False
class EmailFeedSettingField(forms.ChoiceField):
def __init__(self, *arg, **kwarg):
kwarg['choices'] = const.NOTIFICATION_DELIVERY_SCHEDULE_CHOICES
kwarg['widget'] = forms.RadioSelect
super(EmailFeedSettingField, self).__init__(*arg, **kwarg)
class EditUserEmailFeedsForm(forms.Form):
FORM_TO_MODEL_MAP = {
'all_exercises': 'q_all',
'asked_by_me': 'q_ask',
'problemed_by_me': 'q_ans',
'individually_selected': 'q_sel',
'mentions_and_comments': 'm_and_c',
}
NO_EMAIL_INITIAL = {
'all_exercises': 'n',
'asked_by_me': 'n',
'problemed_by_me': 'n',
'individually_selected': 'n',
'mentions_and_comments': 'n',
}
INSTANT_EMAIL_INITIAL = {
'all_exercises': 'i',
'asked_by_me': 'i',
'problemed_by_me': 'i',
'individually_selected': 'i',
'mentions_and_comments': 'i',
}
asked_by_me = EmailFeedSettingField(
label=_('Exercises submitted by me')
)
problemed_by_me = EmailFeedSettingField(
label=_('Problems submitted by me')
)
individually_selected = EmailFeedSettingField(
label=_('Individually selected')
)
all_exercises = EmailFeedSettingField(
label=_('Entire forum (tag filtered)'),
)
mentions_and_comments = EmailFeedSettingField(
label=_('Comments and posts mentioning me'),
)
def set_initial_values(self, user=None):
from askbot import models
KEY_MAP = dict([(v, k) for k, v in self.FORM_TO_MODEL_MAP.iteritems()])
if user is not None:
settings = models.EmailFeedSetting.objects.filter(subscriber=user)
initial_values = {}
for setting in settings:
feed_type = setting.feed_type
form_field = KEY_MAP[feed_type]
frequency = setting.frequency
initial_values[form_field] = frequency
self.initial = initial_values
return self
def reset(self):
"""equivalent to set_frequency('n')
but also returns self due to some legacy requirement
todo: clean up use of this function
"""
if self.is_bound:
self.cleaned_data = self.NO_EMAIL_INITIAL
self.initial = self.NO_EMAIL_INITIAL
return self
def get_db_model_subscription_type_names(self):
"""todo: refactor this - too hacky
should probably use model form instead
returns list of values acceptable in
``attr::models.user.EmailFeedSetting.feed_type``
"""
return self.FORM_TO_MODEL_MAP.values()
def set_frequency(self, frequency='n'):
data = {
'all_exercises': frequency,
'asked_by_me': frequency,
'problemed_by_me': frequency,
'individually_selected': frequency,
'mentions_and_comments': frequency
}
if self.is_bound:
self.cleaned_data = data
self.initial = data
def save(self, user, save_unbound=False):
"""with save_unbound==True will bypass form
validation and save initial values
"""
from askbot import models
changed = False
for form_field, feed_type in self.FORM_TO_MODEL_MAP.items():
s, created = models.EmailFeedSetting.objects.get_or_create(
subscriber=user,
feed_type=feed_type
)
if save_unbound:
#just save initial values instead
if form_field in self.initial:
new_value = self.initial[form_field]
else:
new_value = self.fields[form_field].initial
else:
new_value = self.cleaned_data[form_field]
if s.frequency != new_value:
s.frequency = new_value
s.save()
changed = True
else:
if created:
s.save()
if form_field == 'individually_selected':
user.followed_threads.clear()
return changed
class SubscribeForEmailUpdatesField(forms.ChoiceField):
"""a simple yes or no field to subscribe for email or not"""
def __init__(self, **kwargs):
kwargs['widget'] = forms.widgets.RadioSelect
kwargs['error_messages'] = {
'required': _('please choose one of the options above')
}
kwargs['choices'] = (
('y', _('okay, let\'s try!')),
(
'n',
_('no %(sitename)s email please, thanks')
% {'sitename': askbot_settings.APP_SHORT_NAME}
)
)
super(SubscribeForEmailUpdatesField, self).__init__(**kwargs)
class SimpleEmailSubscribeForm(forms.Form):
subscribe = SubscribeForEmailUpdatesField()
def save(self, user=None):
EFF = EditUserEmailFeedsForm
#here we have kind of an anomaly - the value 'y' is redundant
#with the frequency variable - needs to be fixed
if self.is_bound and self.cleaned_data['subscribe'] == 'y':
email_settings_form = EFF()
email_settings_form.set_initial_values(user)
logging.debug('%s wants to subscribe' % user.username)
else:
email_settings_form = EFF(initial=EFF.NO_EMAIL_INITIAL)
email_settings_form.save(user, save_unbound=True)
class GroupLogoURLForm(forms.Form):
"""form for saving group logo url"""
group_id = forms.IntegerField()
image_url = forms.CharField()
class EditGroupMembershipForm(forms.Form):
"""a form for adding or removing users
to and from user groups"""
user_id = forms.IntegerField()
group_name = forms.CharField()
action = forms.CharField()
def clean_action(self):
"""allowed actions are 'add' and 'remove'"""
action = self.cleaned_data['action']
if action not in ('add', 'remove'):
del self.cleaned_data['action']
raise forms.ValidationError('invalid action')
return action
class EditRejectReasonForm(forms.Form):
reason_id = forms.IntegerField(required=False)
title = CountedWordsField(
min_words=1, max_words=4, field_name=_('Title')
)
details = CountedWordsField(
min_words=6, field_name=_('Description')
)
class ModerateTagForm(forms.Form):
tag_id = forms.IntegerField()
thread_id = forms.IntegerField(required = False)
action = forms.CharField()
def clean_action(self):
action = self.cleaned_data['action']
assert(action in ('accept', 'reject'))
return action
class ShareExerciseForm(forms.Form):
thread_id = forms.IntegerField()
recipient_name = forms.CharField()
|
mxOBS/deb-pkg_trusty_chromium-browser | refs/heads/master | tools/cygprofile/symbolize.py | 9 | #!/usr/bin/python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Symbolize log file produced by cypgofile instrumentation.
Given a log file and the binary being profiled (e.g. executable, shared
library), the script can produce three different outputs: 1) symbols for the
addresses, 2) function and line numbers for the addresses, or 3) an order file.
"""
import optparse
import os
import string
import subprocess
import sys
def ParseLogLines(log_file_lines):
"""Parse a log file produced by the profiled run of clank.
Args:
log_file_lines: array of lines in log file produced by profiled run
lib_name: library or executable containing symbols
Below is an example of a small log file:
5086e000-52e92000 r-xp 00000000 b3:02 51276 libchromeview.so
secs usecs pid:threadid func
START
1314897086 795828 3587:1074648168 0x509e105c
1314897086 795874 3587:1074648168 0x509e0eb4
1314897086 796326 3587:1074648168 0x509e0e3c
1314897086 796552 3587:1074648168 0x509e07bc
END
Returns:
call_info list with list of tuples of the format (sec, usec, call id,
function address called)
"""
call_lines = []
vm_start = 0
line = log_file_lines[0]
assert("r-xp" in line)
end_index = line.find('-')
vm_start = int(line[:end_index], 16)
for line in log_file_lines[2:]:
# print hex(vm_start)
fields = line.split()
if len(fields) == 4:
call_lines.append(fields)
# Convert strings to int in fields.
call_info = []
for call_line in call_lines:
(sec_timestamp, usec_timestamp) = map(int, call_line[0:2])
callee_id = call_line[2]
addr = int(call_line[3], 16)
if vm_start < addr:
addr -= vm_start
call_info.append((sec_timestamp, usec_timestamp, callee_id, addr))
return call_info
def GetStdOutputLines(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = p.communicate()[0]
return output.split('\n')
def ParseLibSymbols(lib_file):
"""Get output from running nm and greping for text symbols.
Args:
lib_file: the library or executable that contains the profiled code
Returns:
list of sorted unique addresses and corresponding size of function symbols
in lib_file and map of addresses to all symbols at a particular address
"""
cmd = ['nm', '-S', '-n', lib_file]
nm_lines = GetStdOutputLines(cmd)
nm_symbols = []
for nm_line in nm_lines:
if any(str in nm_line for str in (' t ', ' W ', ' T ')):
nm_symbols.append(nm_line)
nm_index = 0
unique_addrs = []
address_map = {}
while nm_index < len(nm_symbols):
# If the length of the split line is not 4, then it does not contain all the
# information needed to symbolize (i.e. address, size and symbol name).
if len(nm_symbols[nm_index].split()) == 4:
(addr, size) = [int(x, 16) for x in nm_symbols[nm_index].split()[0:2]]
# Multiple symbols may be at the same address. This is do to aliasing
# done by the compiler. Since there is no way to be sure which one was
# called in profiled run, we will symbolize to include all symbol names at
# a particular address.
fnames = []
while (nm_index < len(nm_symbols) and
addr == int(nm_symbols[nm_index].split()[0], 16)):
if len(nm_symbols[nm_index].split()) == 4:
fnames.append(nm_symbols[nm_index].split()[3])
nm_index += 1
address_map[addr] = fnames
unique_addrs.append((addr, size))
else:
nm_index += 1
return (unique_addrs, address_map)
class SymbolNotFoundException(Exception):
def __init__(self,value):
super(SymbolNotFoundException,self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
def BinarySearchAddresses(addr, start, end, arr):
"""Find starting address of a symbol at a particular address.
The reason we can not directly use the address provided by the log file is
that the log file may give an address after the start of the symbol. The
logged address is often one byte after the start. By using this search
function rather than just subtracting one from the logged address allows
the logging instrumentation to log any address in a function.
Args:
addr: the address being searched for
start: the starting index for the binary search
end: the ending index for the binary search
arr: the list being searched containing tuple of address and size
Returns:
the starting address of the symbol at address addr
Raises:
Exception: if address not found. Functions expects all logged addresses
to be found
"""
# print "addr: " + str(addr) + " start: " + str(start) + " end: " + str(end)
if start >= end or start == end - 1:
# arr[i] is a tuple of address and size. Check if addr inside range
if addr >= arr[start][0] and addr < arr[start][0] + arr[start][1]:
return arr[start][0]
elif addr >= arr[end][0] and addr < arr[end][0] + arr[end][1]:
return arr[end][0]
else:
raise SymbolNotFoundException(addr)
else:
halfway = (start + end) / 2
(nm_addr, size) = arr[halfway]
# print "nm_addr: " + str(nm_addr) + " halfway: " + str(halfway)
if addr >= nm_addr and addr < nm_addr + size:
return nm_addr
elif addr < nm_addr:
return BinarySearchAddresses(addr, start, halfway-1, arr)
else:
# Condition (addr >= nm_addr + size) must be true.
return BinarySearchAddresses(addr, halfway+1, end, arr)
def FindFunctions(addr, unique_addrs, address_map):
"""Find function symbol names at address addr."""
return address_map[BinarySearchAddresses(addr, 0, len(unique_addrs) - 1,
unique_addrs)]
def AddrToLine(addr, lib_file):
"""Use addr2line to determine line info of a particular address."""
cmd = ['addr2line', '-f', '-e', lib_file, hex(addr)]
output = GetStdOutputLines(cmd)
assert(len(output) == 2)
return ':'.join(output)
def GetObjectFileNames(obj_dir):
""" Gets the list of object files in the output directory. """
obj_files = []
for (dirpath, _, filenames) in os.walk(obj_dir):
for file_name in filenames:
if file_name.endswith('.o'):
obj_files.append(os.path.join(dirpath, file_name))
return obj_files
class WarningCollector(object):
def __init__(self, max_warnings):
self._warnings = 0
self._max_warnings = max_warnings
def Write(self, message):
if self._warnings < self._max_warnings:
sys.stderr.write(message + '\n')
self._warnings += 1
def WriteEnd(self, message):
if self._warnings > self._max_warnings:
sys.stderr.write(str(self._warnings - self._max_warnings) +
' more warnings for: ' + message + '\n')
def SymbolToSection(obj_dir):
""" Gets a mapping from symbol to linker section name by scanning all
of the object files. """
object_files = GetObjectFileNames(obj_dir)
symbol_to_section_map = {}
symbol_warnings = WarningCollector(300)
for obj_file in object_files:
cmd = ['objdump', '-w', '-t', obj_file]
symbol_lines = GetStdOutputLines(cmd)
for symbol_line in symbol_lines:
items = symbol_line.split()
# All of the symbol lines we care about are in the form
# 0000000000 g F .text.foo 000000000 [.hidden] foo
# where g (global) might also be l (local) or w (weak).
if len(items) > 4 and items[2] == 'F':
# This symbol is a function
symbol = items[len(items) - 1]
if symbol.startswith('.LTHUNK'):
continue
section = items[3]
if ((symbol in symbol_to_section_map) and
(symbol_to_section_map[symbol] != section)):
symbol_warnings.Write('WARNING: Symbol ' + symbol +
' in conflicting sections ' + section +
' and ' + symbol_to_section_map[symbol])
elif not section.startswith('.text.'):
symbol_warnings.Write('WARNING: Symbol ' + symbol +
' in incorrect section ' + section)
else:
symbol_to_section_map[symbol] = section
symbol_warnings.WriteEnd('bad sections')
return symbol_to_section_map
def main():
"""Write output for profiled run to standard out.
The format of the output depends on the output type specified as the third
command line argument. The default output type is to symbolize the addresses
of the functions called.
"""
parser = optparse.OptionParser('usage: %prog [options] log_file lib_file')
parser.add_option('-t', '--outputType', dest='output_type',
default='symbolize', type='string',
help='lineize or symbolize or orderfile')
# Option for output type. The log file and lib file arguments are required
# by the script and therefore are not options.
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('expected 2 args: log_file lib_file')
(log_file, lib_file) = args
output_type = options.output_type
obj_dir = os.path.abspath(os.path.join(os.path.dirname(lib_file), '../obj'))
log_file_lines = map(string.rstrip, open(log_file).readlines())
call_info = ParseLogLines(log_file_lines)
(unique_addrs, address_map) = ParseLibSymbols(lib_file)
# Check for duplicate addresses in the log file, and print a warning if
# duplicates are found. The instrumentation that produces the log file
# should only print the first time a function is entered.
addr_list = []
for call in call_info:
addr = call[3]
if addr not in addr_list:
addr_list.append(addr)
else:
print('WARNING: Address ' + hex(addr) + ' (line= ' +
AddrToLine(addr, lib_file) + ') already profiled.')
symbol_to_section_map = SymbolToSection(obj_dir)
unknown_symbol_warnings = WarningCollector(300)
symbol_not_found_warnings = WarningCollector(300)
for call in call_info:
addr = call[3]
if output_type == 'lineize':
symbol = AddrToLine(addr, lib_file)
print(str(call[0]) + ' ' + str(call[1]) + '\t' + str(call[2]) + '\t'
+ symbol)
elif output_type == 'orderfile':
try:
symbols = FindFunctions(addr, unique_addrs, address_map)
for symbol in symbols:
if symbol in symbol_to_section_map:
print symbol_to_section_map[symbol]
else:
unknown_symbol_warnings.Write(
'WARNING: No known section for symbol ' + symbol)
print ''
except SymbolNotFoundException:
symbol_not_found_warnings.Write(
'WARNING: Did not find function in binary. addr: '
+ hex(addr))
else:
try:
symbols = FindFunctions(addr, unique_addrs, address_map)
print(str(call[0]) + ' ' + str(call[1]) + '\t' + str(call[2]) + '\t'
+ symbols[0])
first_symbol = True
for symbol in symbols:
if not first_symbol:
print '\t\t\t\t\t' + symbol
else:
first_symbol = False
except SymbolNotFoundException:
symbol_not_found_warnings.Write(
'WARNING: Did not find function in binary. addr: '
+ hex(addr))
unknown_symbol_warnings.WriteEnd('no known section for symbol')
symbol_not_found_warnings.WriteEnd('did not find function')
if __name__ == '__main__':
main()
|
pipsiscool/audacity | refs/heads/master | lib-src/lv2/serd/waflib/TaskGen.py | 62 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
class task_gen(object):
mappings={}
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.prec=Utils.defaultdict(list)
self.mappings={}
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[id(self.path)]=self.bld.idx.get(id(self.path),0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[id(self.path)]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__.keys():
if x not in['env','bld','compiled_tasks','tasks']:
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):return val.split()
else:return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(list(st))
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
raise Errors.WafError('Cycle detected in the method execution %r'%prec)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d'%(self,id(self)))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)'%(x,id(self)))
v()
Logs.debug('task_gen: posted %s'%self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
if name.endswith(k):
return self.mappings[k]
for k in task_gen.mappings:
if name.endswith(k):
return task_gen.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (did you forget to load a waf tool?)"%(node,task_gen.mappings.keys()))
def create_task(self,name,src=None,tgt=None):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in['path','features']:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
ext=decider and decider(self,node)or cls.ext_out
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
keys=list(self.mappings.keys())+list(self.__class__.mappings.keys())
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in keys:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.bld.install_files(install_path,tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,self.path.__class__):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
cls=Task.task_factory(name,self.rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'update_outputs',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def run(self):
if getattr(self.generator,'is_copy',None):
self.outputs[0].write(self.inputs[0].read('rb'),'wb')
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
return
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
return
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
code=re_m4.sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env.get_flat(x)or self.env.get_flat(x.upper())
d[x]=str(tmp)
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=self.dep_vars=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun))
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.bld.install_files(getattr(self,'install_path','${LIBDIR}/pkgconfig/'),tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig=None
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('cound not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints and b.name.endswith('.h'):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
inst_to=getattr(self,'install_path',None)
if inst_to:
self.bld.install_files(inst_to,b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
|
ericdill/scikit-xray | refs/heads/master | skbeam/core/fitting/tests/test_lineshapes.py | 7 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li ([email protected]) #
# created on 07/16/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import (assert_array_almost_equal)
from skbeam.core.fitting import (gaussian, gausssian_step, gaussian_tail,
elastic, compton, lorentzian, lorentzian2,
voigt, pvoigt)
from skbeam.core.fitting import (ComptonModel, ElasticModel)
from skbeam.core.fitting import (gamma_dist, nbinom_dist, poisson_dist)
def test_gauss_peak():
"""
test of gauss function from xrf fit
"""
area = 1
cen = 0
std = 1
x = np.arange(-3, 3, 0.5)
out = gaussian(x, area, cen, std)
y_true = [0.00443185, 0.0175283, 0.05399097, 0.1295176, 0.24197072,
0.35206533, 0.39894228, 0.35206533, 0.24197072, 0.1295176,
0.05399097, 0.0175283]
assert_array_almost_equal(y_true, out)
def test_gauss_step():
"""
test of gaussian step function from xrf fit
"""
y_true = [1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 9.99999999e-01, 9.99999713e-01,
9.99968329e-01, 9.98650102e-01, 9.77249868e-01,
8.41344746e-01, 5.00000000e-01, 1.58655254e-01,
2.27501319e-02, 1.34989803e-03, 3.16712418e-05]
area = 1
cen = 0
std = 1
x = np.arange(-10, 5, 1)
peak_e = 1.0
out = gausssian_step(x, area, cen, std, peak_e)
assert_array_almost_equal(y_true, out)
def test_gauss_tail():
"""
test of gaussian tail function from xrf fit
"""
y_true = [7.48518299e-05, 2.03468369e-04, 5.53084370e-04, 1.50343919e-03,
4.08677027e-03, 1.11086447e-02, 3.01566200e-02, 8.02175541e-02,
1.87729388e-01, 3.03265330e-01, 2.61578292e-01, 3.75086265e-02,
2.22560560e-03, 5.22170501e-05, 4.72608544e-07]
area = 1
cen = 0
std = 1
x = np.arange(-10, 5, 1)
gamma = 1.0
out = gaussian_tail(x, area, cen, std, gamma)
assert_array_almost_equal(y_true, out)
def test_elastic_peak():
"""
test of elastic peak from xrf fit
"""
y_true = [0.00085311, 0.00164853, 0.00307974, 0.00556237, 0.00971259,
0.01639604, 0.02675911, 0.04222145, 0.06440556, 0.09498223,
0.13542228, 0.18666663, 0.24875512, 0.32048386, 0.39918028,
0.48068522, 0.55960456, 0.62984039, 0.68534389, 0.72096698,
0.73324816, 0.72096698, 0.68534389, 0.62984039, 0.55960456,
0.48068522, 0.39918028, 0.32048386, 0.24875512, 0.18666663,
0.13542228, 0.09498223, 0.06440556, 0.04222145, 0.02675911,
0.01639604, 0.00971259, 0.00556237, 0.00307974, 0.00164853]
area = 1
energy = 10
offset = 0.01
fanoprime = 0.01
e_offset = 0
e_linear = 1
e_quadratic = 0
ev = np.arange(8, 12, 0.1)
out = elastic(ev, area, energy,
offset, fanoprime,
e_offset, e_linear, e_quadratic)
assert_array_almost_equal(y_true, out)
def test_compton_peak():
"""
test of compton peak from xrf fit
"""
y_true = [0.01332237, 0.01536984, 0.01870113, 0.02401014, 0.03223281,
0.04455143, 0.0623487, 0.08709168, 0.12013435, 0.16244524,
0.2142911, 0.27493377, 0.34241693, 0.41352197, 0.48395163,
0.5487556, 0.6029529, 0.64224726, 0.66369326, 0.65792554,
0.63050209, 0.58478146, 0.52510892, 0.45674079, 0.38508357,
0.31500557, 0.25033778, 0.19362201, 0.14610264, 0.10790876,
0.07834781, 0.05623019, 0.04016135, 0.02876383, 0.02081757,
0.01532608, 0.01152704, 0.00886833, 0.00696818, 0.00557234]
energy = 10
offset = 0.01
fano = 0.01
angle = 90
fwhm_corr = 1
amp = 1
f_step = 0
f_tail = 0.1
gamma = 10
hi_f_tail = 0.1
hi_gamma = 1
e_offset = 0
e_linear = 1
e_quadratic = 0
ev = np.arange(8, 12, 0.1)
out = compton(ev, amp, energy, offset, fano,
e_offset, e_linear, e_quadratic, angle,
fwhm_corr, f_step, f_tail,
gamma, hi_f_tail, hi_gamma)
assert_array_almost_equal(y_true, out)
def test_lorentzian_peak():
y_true = [0.03151583, 0.03881828, 0.04897075, 0.06366198, 0.0860297,
0.12242688, 0.18724111, 0.31830989, 0.63661977, 1.59154943,
3.18309886, 1.59154943, 0.63661977, 0.31830989, 0.18724111,
0.12242688, 0.0860297, 0.06366198, 0.04897075, 0.03881828]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
out = lorentzian(x, a, cen, std)
assert_array_almost_equal(y_true, out)
def test_lorentzian_squared_peak():
y_true = [3.12037924e-04, 4.73393644e-04, 7.53396180e-04,
1.27323954e-03, 2.32512700e-03, 4.70872613e-03,
1.10141829e-02, 3.18309886e-02, 1.27323954e-01,
7.95774715e-01, 3.18309886e+00, 7.95774715e-01,
1.27323954e-01, 3.18309886e-02, 1.10141829e-02,
4.70872613e-03, 2.32512700e-03, 1.27323954e-03,
7.53396180e-04, 4.73393644e-04]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
out = lorentzian2(x, a, cen, std)
assert_array_almost_equal(y_true, out)
def test_voigt_peak():
y_true = [0.03248735, 0.04030525, 0.05136683, 0.06778597, 0.09377683,
0.13884921, 0.22813635, 0.43385822, 0.90715199, 1.65795663,
2.08709281, 1.65795663, 0.90715199, 0.43385822, 0.22813635,
0.13884921, 0.09377683, 0.06778597, 0.05136683, 0.04030525]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
out1 = voigt(x, a, cen, std, gamma=0.1)
out2 = voigt(x, a, cen, std)
assert_array_almost_equal(y_true, out1)
assert_array_almost_equal(y_true, out2)
def test_pvoigt_peak():
y_true = [0.01575792, 0.01940914, 0.02448538, 0.03183099, 0.04301488,
0.06122087, 0.09428971, 0.18131419, 0.58826472, 2.00562834,
3.58626083, 2.00562834, 0.58826472, 0.18131419, 0.09428971,
0.06122087, 0.04301488, 0.03183099, 0.02448538, 0.01940914]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
fraction = 0.5
out = pvoigt(x, a, cen, std, fraction)
assert_array_almost_equal(y_true, out)
def test_elastic_model():
area = 11
energy = 10
offset = 0.02
fanoprime = 0.03
e_offset = 0
e_linear = 0.01
e_quadratic = 0
true_param = [fanoprime, area, energy]
x = np.arange(800, 1200, 1)
out = elastic(x, area, energy, offset, fanoprime,
e_offset, e_linear, e_quadratic)
elastic_model = ElasticModel()
# fwhm_offset is not a sensitive parameter, used as a fixed value
elastic_model.set_param_hint(name='e_offset', value=0, vary=False)
elastic_model.set_param_hint(name='e_linear', value=0.01, vary=False)
elastic_model.set_param_hint(name='e_quadratic', value=0, vary=False)
elastic_model.set_param_hint(name='coherent_sct_energy', value=10,
vary=False)
elastic_model.set_param_hint(name='fwhm_offset', value=0.02, vary=False)
elastic_model.set_param_hint(name='fwhm_fanoprime', value=0.03, vary=False)
result = elastic_model.fit(out, x=x, coherent_sct_amplitude=10)
fitted_val = [result.values['fwhm_fanoprime'],
result.values['coherent_sct_amplitude'],
result.values['coherent_sct_energy']]
assert_array_almost_equal(true_param, fitted_val, decimal=2)
def test_compton_model():
energy = 10
offset = 0.001
fano = 0.01
angle = 90
fwhm_corr = 1
amp = 20
f_step = 0.05
f_tail = 0.1
gamma = 2
hi_f_tail = 0.01
hi_gamma = 1
e_offset = 0
e_linear = 0.01
e_quadratic = 0
x = np.arange(800, 1200, 1.0)
true_param = [energy, amp]
out = compton(x, amp, energy, offset, fano,
e_offset, e_linear, e_quadratic,
angle, fwhm_corr, f_step, f_tail,
gamma, hi_f_tail, hi_gamma)
cm = ComptonModel()
# parameters not sensitive
cm.set_param_hint(name='compton_hi_gamma', value=hi_gamma, vary=False)
cm.set_param_hint(name='fwhm_offset', value=offset, vary=False)
cm.set_param_hint(name='compton_angle', value=angle, vary=False)
cm.set_param_hint(name='e_offset', value=e_offset, vary=False)
cm.set_param_hint(name='e_linear', value=e_linear, vary=False)
cm.set_param_hint(name='e_quadratic', value=e_quadratic, vary=False)
cm.set_param_hint(name='fwhm_fanoprime', value=fano, vary=False)
cm.set_param_hint(name='compton_hi_f_tail', value=hi_f_tail, vary=False)
cm.set_param_hint(name='compton_f_step', value=f_step, vary=False)
cm.set_param_hint(name='compton_f_tail', value=f_tail, vary=False)
cm.set_param_hint(name='compton_gamma', value=gamma, vary=False)
cm.set_param_hint(name='compton_amplitude', value=20, vary=False)
cm.set_param_hint(name='compton_fwhm_corr', value=fwhm_corr, vary=False)
p = cm.make_params()
result = cm.fit(out, x=x, params=p, compton_amplitude=20,
coherent_sct_energy=10)
fit_val = [result.values['coherent_sct_energy'],
result.values['compton_amplitude']]
assert_array_almost_equal(true_param, fit_val, decimal=2)
def test_dist():
M = 1.9 # number of coherent modes
K = 3.15 # number of photons
bin_edges = np.array([0., 0.4, 0.8, 1.2, 1.6, 2.0])
pk_n = nbinom_dist(bin_edges, K, M)
pk_p = poisson_dist(bin_edges, K)
pk_g = gamma_dist(bin_edges, K, M)
assert_array_almost_equal(pk_n, np.array([0.15609113, 0.17669628,
0.18451672, 0.1837303,
0.17729389, 0.16731627]))
assert_array_almost_equal(pk_g, np.array([0., 0.13703903, 0.20090424,
0.22734693, 0.23139384,
0.22222281]))
assert_array_almost_equal(pk_p,
np.array([0.04285213, 0.07642648,
0.11521053, 0.15411372,
0.18795214, 0.21260011]))
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
WendellDuncan/or-tools | refs/heads/master | examples/python/debruijn_binary.py | 5 | # Copyright 2010 Hakan Kjellerstrand [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
de Bruijn sequences in Google CP Solver.
Implementation of de Bruijn sequences in Minizinc, both 'classical' and
'arbitrary'.
The 'arbitrary' version is when the length of the sequence (m here) is <
base**n.
Compare with the the web based programs:
http://www.hakank.org/comb/debruijn.cgi
http://www.hakank.org/comb/debruijn_arb.cgi
Compare with the following models:
* Tailor/Essence': http://hakank.org/tailor/debruijn.eprime
* MiniZinc: http://hakank.org/minizinc/debruijn_binary.mzn
* SICStus: http://hakank.org/sicstus/debruijn.pl
* Zinc: http://hakank.org/minizinc/debruijn_binary.zinc
* Choco: http://hakank.org/choco/DeBruijn.java
* Comet: http://hakank.org/comet/debruijn.co
* ECLiPSe: http://hakank.org/eclipse/debruijn.ecl
* Gecode: http://hakank.org/gecode/debruijn.cpp
* Gecode/R: http://hakank.org/gecode_r/debruijn_binary.rb
* JaCoP: http://hakank.org/JaCoP/DeBruijn.java
This model was created by Hakan Kjellerstrand ([email protected])
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from __future__ import print_function
import sys
from ortools.constraint_solver import pywrapcp
# converts a number (s) <-> an array of numbers (t) in the specific base.
def toNum(solver, t, s, base):
tlen = len(t)
solver.Add(
s == solver.Sum([(base ** (tlen - i - 1)) * t[i] for i in range(tlen)]))
def main(base=2, n=3, m=8):
# Create the solver.
solver = pywrapcp.Solver("de Bruijn sequences")
#
# data
#
# base = 2 # the base to use, i.e. the alphabet 0..n-1
# n = 3 # number of bits to use (n = 4 -> 0..base^n-1 = 0..2^4 -1, i.e. 0..15)
# m = base**n # the length of the sequence. For "arbitrary" de Bruijn
# sequences
# base = 4
# n = 4
# m = base**n
# harder problem
#base = 13
#n = 4
#m = 52
# for n = 4 with different value of base
# base = 2 0.030 seconds 16 failures
# base = 3 0.041 108
# base = 4 0.070 384
# base = 5 0.231 1000
# base = 6 0.736 2160
# base = 7 2.2 seconds 4116
# base = 8 6 seconds 7168
# base = 9 16 seconds 11664
# base = 10 42 seconds 18000
# base = 6
# n = 4
# m = base**n
# if True then ensure that the number of occurrences of 0..base-1 is
# the same (and if m mod base = 0)
check_same_gcc = True
print("base: %i n: %i m: %i" % (base, n, m))
if check_same_gcc:
print("Checks gcc")
# declare variables
x = [solver.IntVar(0, (base ** n) - 1, "x%i" % i) for i in range(m)]
binary = {}
for i in range(m):
for j in range(n):
binary[(i, j)] = solver.IntVar(0, base - 1, "x_%i_%i" % (i, j))
bin_code = [solver.IntVar(0, base - 1, "bin_code%i" % i) for i in range(m)]
#
# constraints
#
#solver.Add(solver.AllDifferent([x[i] for i in range(m)]))
solver.Add(solver.AllDifferent(x))
# converts x <-> binary
for i in range(m):
t = [solver.IntVar(0, base - 1, "t_%i" % j) for j in range(n)]
toNum(solver, t, x[i], base)
for j in range(n):
solver.Add(binary[(i, j)] == t[j])
# the de Bruijn condition
# the first elements in binary[i] is the same as the last
# elements in binary[i-i]
for i in range(1, m - 1):
for j in range(1, n - 1):
solver.Add(binary[(i - 1, j)] == binary[(i, j - 1)])
# ... and around the corner
for j in range(1, n):
solver.Add(binary[(m - 1, j)] == binary[(0, j - 1)])
# converts binary -> bin_code
for i in range(m):
solver.Add(bin_code[i] == binary[(i, 0)])
# extra: ensure that all the numbers in the de Bruijn sequence
# (bin_code) has the same occurrences (if check_same_gcc is True
# and mathematically possible)
gcc = [solver.IntVar(0, m, "gcc%i" % i) for i in range(base)]
solver.Add(solver.Distribute(bin_code, list(range(base)), gcc))
if check_same_gcc and m % base == 0:
for i in range(1, base):
solver.Add(gcc[i] == gcc[i - 1])
#
# solution and search
#
solution = solver.Assignment()
solution.Add([x[i] for i in range(m)])
solution.Add([bin_code[i] for i in range(m)])
# solution.Add([binary[(i,j)] for i in range(m) for j in range(n)])
solution.Add([gcc[i] for i in range(base)])
db = solver.Phase([x[i] for i in range(m)] + [bin_code[i] for i in range(m)],
solver.CHOOSE_MIN_SIZE_LOWEST_MAX,
solver.ASSIGN_MIN_VALUE)
num_solutions = 0
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print("\nSolution %i" % num_solutions)
print("x:", [int(x[i].Value()) for i in range(m)])
print("gcc:", [int(gcc[i].Value()) for i in range(base)])
print("de Bruijn sequence:", [int(bin_code[i].Value()) for i in range(m)])
# for i in range(m):
# for j in range(n):
# print binary[(i,j)].Value(),
# print
# print
solver.EndSearch()
if num_solutions == 0:
print("No solution found")
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
base = 2
n = 3
m = base ** n
if __name__ == "__main__":
if len(sys.argv) > 1:
base = int(sys.argv[1])
if len(sys.argv) > 2:
n = int(sys.argv[2])
if len(sys.argv) > 3:
m = int(sys.argv[3])
main(base, n, m)
|
cjmcgraw/xhtml2pdf | refs/heads/master | demo/tgpisa/tgpisa/json.py | 180 | # A JSON-based API(view) for your app.
# Most rules would look like:
# @jsonify.when("isinstance(obj, YourClass)")
# def jsonify_yourclass(obj):
# return [obj.val1, obj.val2]
# @jsonify can convert your objects to following types:
# lists, dicts, numbers and strings
from turbojson.jsonify import jsonify
|
h-j-13/MyNote | refs/heads/master | Programming language/Python/File_IO/main_t.py | 1 | # encoding:utf-8
"""
基于faiss的海量高维域名相似度计算
多进程队列通信的异步获取
author : h-j-13
time : 2018-6-28
"""
import time
from multiprocessing import Process, Value, Queue
import numpy
import faiss
import File_IO_new as File_IO
# faiss相关设置
D = 300
K = 50
INDEX_STR = "OPQ20_80,IVF100,PQ20"
TRAIN_FILE_SIZE = 2
# 文件I/O设置
INPUT_FILE_PATH = './data/'
OUTPUT_FILE_PATH = './data/result_t/'
OUTPUT_FILE_BASIC_NAME = 'faiss_kNN_'
FILE_NAME_LIST = File_IO.getAllFileName(INPUT_FILE_PATH)
READ_FILE_NUM = 3 # 异步读取文件数目
# 进程通信变量
# 进程之间默认是不能共享全局变量的,需要通过 multiprocessing.value对象
FILE_IDS_VECTOR_QUEUE = Queue()
FILE_IDS_VECTOR_QUEUE_FOR_SEARCH = Queue()
FILE_WRITE_QUEUE = Queue()
START_RAED = Value("i", 0)
START_READ_FOR_SEARCH = Value("i", 0)
END_READ = Value("i", 0)
END_READ_FOR_SEARCH = Value("i", 0)
END_WRITE_FILE = Value("i", 0)
WRITE_FILE_PROCESS = None
def read_file_process(start_file_num=TRAIN_FILE_SIZE,
file_path=INPUT_FILE_PATH,
file_name_list=FILE_NAME_LIST, ):
"""
读取文件轮询进程,数据通过全局变量/队列进行交互
:param start_file_num: 开始读取的文件序号(前几个文件经过训练已经加载到索引中)
:param file_path: 文件路径
:param file_name_list: 文件名列表
:param read_file_num: 一次性读取文件数量
"""
global START_RAED, END_READ, FILE_IDS_VECTOR_QUEUE, READ_FILE_NUM
while not START_RAED.value: # 等待主进程信号
time.sleep(1)
for i in xrange(start_file_num, len(file_name_list)):
while FILE_IDS_VECTOR_QUEUE.qsize() > READ_FILE_NUM: # 控制队列大小
time.sleep(1)
file_name = file_name_list[i]
ids, data = File_IO.readfile2ids_vec(file_path + file_name)
FILE_IDS_VECTOR_QUEUE.put((ids, data))
print File_IO.getLocalTimeStr(),
print file_name + " 向量数据添加至队列"
END_READ.value = 1 # 告知主进程处理结束
return
def read_file_for_search_process(file_path=INPUT_FILE_PATH,
file_name_list=FILE_NAME_LIST, ):
"""读取文件轮询进程,数据通过全局变量/队列进行交互(执行搜索时用)"""
global START_READ_FOR_SEARCH, END_READ_FOR_SEARCH, FILE_IDS_VECTOR_QUEUE_FOR_SEARCH, READ_FILE_NUM
while not START_READ_FOR_SEARCH.value: # 等待主进程信号
time.sleep(1)
for file_name in file_name_list:
# 控制队列大小
while FILE_IDS_VECTOR_QUEUE_FOR_SEARCH.qsize() > READ_FILE_NUM:
time.sleep(1)
ids, data = File_IO.readfile2ids_vec(file_path + file_name)
FILE_IDS_VECTOR_QUEUE_FOR_SEARCH.put((ids, data))
print File_IO.getLocalTimeStr(),
print file_name + " 向量数据添加至待搜索队列"
# 向主进程回送读取完毕的信号
END_READ_FOR_SEARCH.value = 1
return
def write_file_process():
"""写入结果文件进程"""
global FILE_WRITE_QUEUE, END_WRITE_FILE
while not END_WRITE_FILE.value:
if not FILE_WRITE_QUEUE.empty():
(file_path, ids, I, D) = FILE_WRITE_QUEUE.get()
File_IO.writeSearchResult(file_path, ids, I, D)
print File_IO.getLocalTimeStr() + " wp完成了一轮向量搜索.结果写入 " + str(file_path)
else:
time.sleep(1)
print File_IO.getLocalTimeStr() + " 结果队列中尚未处理数量:" + str(FILE_WRITE_QUEUE.qsize())
# 最后将队列内的文件处理完
while not FILE_WRITE_QUEUE.empty():
(file_path, ids, I, D) = FILE_WRITE_QUEUE.get()
File_IO.writeSearchResult(file_path, ids, I, D)
print File_IO.getLocalTimeStr() + " wp完成了一次向量搜索.结果写入 " + str(file_path)
return
def IndexInit(index_str="OPQ8_64,IVF100,PQ8", d=D):
"""初始化,生成并训练索引"""
global INPUT_FILE_PATH, FILE_NAME_LIST, TRAIN_FILE_SIZE
global START_RAED, WRITE_FILE_PROCESS
# 初始化I/O进程
rfp = Process(target=read_file_process, name='ReadFiledata')
rfp.start()
rfpfs = Process(target=read_file_for_search_process, name='ReadFiledataForSearch')
rfpfs.start()
WRITE_FILE_PROCESS = Process(target=write_file_process, name='WriteFile')
WRITE_FILE_PROCESS.start()
# 初始化索引
index = faiss.index_factory(d, index_str)
print File_IO.getLocalTimeStr(),
print 'START - Index=' + index_str + ' k=' + str(K)
# 获取训练数据
for fn in xrange(TRAIN_FILE_SIZE):
if fn == 0: # 第一次读取,初始化矩阵
ids, train_data = File_IO.readfile2ids_vec(INPUT_FILE_PATH + FILE_NAME_LIST[fn])
else:
temp_ids, temp_data = File_IO.readfile2ids_vec(INPUT_FILE_PATH + FILE_NAME_LIST[fn])
ids = numpy.hstack((ids, temp_ids))
del temp_ids # 及时删除引用,自动进行GC
train_data = numpy.vstack((train_data, temp_data))
del temp_data
print File_IO.getLocalTimeStr(),
print '完成训练数据载入.'
# 训练模型
index.train(train_data)
print File_IO.getLocalTimeStr(),
print '完成训练.'
# 同时通知I/O进程开始读写数据
START_RAED.value = 1
# 载入训练数据
index.add_with_ids(train_data, ids)
del train_data
del ids
print File_IO.getLocalTimeStr(),
print "训练数据添加至索引, 训练用数据量:" + str(index.ntotal)
return index
def add_vectors(index):
"""向索引中添加向量"""
global END_READ, START_READ_FOR_SEARCH, FILE_IDS_VECTOR_QUEUE
# 当读写进程都写完成并把数据从队列里全部取出时,添加向量过程结束
while not END_READ.value:
if not FILE_IDS_VECTOR_QUEUE.empty():
(ids, vec) = FILE_IDS_VECTOR_QUEUE.get()
if len(ids):
index.add_with_ids(vec, ids)
print File_IO.getLocalTimeStr() + " 向量数据添加..."
else:
time.sleep(1)
# 同时通知I/O进程开始准备搜索用数据
START_READ_FOR_SEARCH.value = 1
# 若队列里还有数据,则将其取完
print File_IO.getLocalTimeStr() + " 添加队列中尚未处理数量:" + str(FILE_IDS_VECTOR_QUEUE.qsize())
while not FILE_IDS_VECTOR_QUEUE.empty():
(ids, vec) = FILE_IDS_VECTOR_QUEUE.get()
if len(vec):
index.add_with_ids(vec, ids)
print File_IO.getLocalTimeStr() + " 向量数据添加."
def search_kNN(index):
"""搜索临近向量"""
global OUTPUT_FILE_PATH, OUTPUT_FILE_BASIC_NAME, END_WRITE_FILE, FILE_WRITE_QUEUE, WRITE_FILE_PROCESS
global FILE_IDS_VECTOR_QUEUE_FOR_SEARCH, END_READ_FOR_SEARCH
global K
print File_IO.getLocalTimeStr() + " 开始执行搜索,目前索引中向量数量:" + str(index.ntotal)
fcnt = 0
while (not END_READ_FOR_SEARCH.value) or (not FILE_IDS_VECTOR_QUEUE_FOR_SEARCH.empty()):
if not FILE_IDS_VECTOR_QUEUE_FOR_SEARCH.empty():
(ids, vec) = FILE_IDS_VECTOR_QUEUE_FOR_SEARCH.get()
if len(ids): # 搜索
D, I = index.search(vec, K)
# 将结果添加进写入进程
file_path = OUTPUT_FILE_PATH + OUTPUT_FILE_BASIC_NAME + str(fcnt)
fcnt += 1
# 避免在队列里积累太多向量
while FILE_WRITE_QUEUE.qsize() > READ_FILE_NUM:
time.sleep(1)
FILE_WRITE_QUEUE.put((file_path, ids, I, D))
else:
time.sleep(1)
END_WRITE_FILE.value = 1
WRITE_FILE_PROCESS.join()
def main():
"""主流程"""
global INDEX_STR
index = IndexInit(index_str=INDEX_STR)
add_vectors(index)
search_kNN(index)
print File_IO.getLocalTimeStr() + " Finish"
if __name__ == '__main__':
main()
# 提升了大约38%
|
ArtezGDA/Algorithmic-Nature | refs/heads/master | Luc/sketches & libs/supershape/supershape-example1.py | 1 | from math import sin, cos
try:
supershape = ximport("supershape")
except:
supershape = ximport("__init__")
reload(supershape)
speed(100)
size(400, 400)
def setup():
global x, y, w, h, m, n1, n2, n3, i
x, y = 200, 200
w, h = 100, 100
m = 6.0
n1 = 1.0
n2 = 1.0
n3 = 1.0
i = 0.0
def draw():
global x, y, w, h, m, n1, n2, n3, i
m = 12
n1 = 5.0 + sin(i)
n2 = 10 + cos(i) * 10
n3 = sin(i) * 10
i += 0.05
rotate(i*10)
p = supershape.path(x, y, w, h, m, n1, n2, n3)
drawpath(p)
|
neumerance/cloudloon2 | refs/heads/master | .venv/lib/python2.7/site-packages/yaml/cyaml.py | 538 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from constructor import *
from serializer import *
from representer import *
from resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
|
marcoscastro/pygraph | refs/heads/master | tests/test4.py | 1 | #-*- coding:utf-8 -*-
from pygraph import *
graph = Graph()
graph.add_edge(Edge(Node(0), Node(1)))
graph.add_edge(Edge(Node(0), Node(2)))
graph.add_edge(Edge(Node(1), Node(2)))
graph.add_edge(Edge(Node(2), Node(0)))
graph.add_edge(Edge(Node(2), Node(3)))
graph.add_edge(Edge(Node(3), Node(3)))
def run_test_directed(expected, returned):
if expected != returned:
print("Não passou no teste: run_test_directed")
else:
print("Passou no teste: run_test_directed")
def run_test_connected(expected, returned):
if expected != returned:
print("Não passou no teste: run_test_connected")
else:
print("Passou no teste: run_test_connected")
def run_test_completed(expected, returned):
if expected != returned:
print("Não passou no teste: run_test_completed")
else:
print("Passou no teste: run_test_completed")
def run_test_cyclic(expected, returned):
if expected != returned:
print("Não passou no teste: run_test_cyclic")
else:
print("Passou no teste: run_test_cyclic")
run_test_directed(True, graph.is_directed())
run_test_connected(True, graph.is_connected())
run_test_completed(False, graph.is_complete())
run_test_cyclic(True, graph.is_cyclic()) |
xu6148152/Binea_Python_Project | refs/heads/master | FluentPython/object_reference_mutability_recycling/weakref_test.py | 1 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import weakref
a_set = {0, 1}
wref = weakref.ref(a_set)
print(wref)
print(wref())
a_set = {2, 3, 4}
print(wref())
print(wref() is None)
print(wref() is None)
class Cheese:
def __init__(self, kind):
self.kind = kind
def __repr__(self):
return 'Cheese(%r)' % self.kind
import weakref
stock = weakref.WeakKeyDictionary()
catalog = [Cheese('Red Leicester'), Cheese('Tilsit'), Cheese('Brie'), Cheese('Parmesan')]
for cheese in catalog:
stock[cheese.kind] = cheese
print(sorted(stock.keys()))
del catalog
print(stock.keys())
del cheese
print(sorted(stock.keys()))
|
mcoavoux/wiki_parse | refs/heads/master | word_count.py | 1 | #encoding:utf8
import os
import os.path
import bz2
import sys
import argparse
from joblib import Parallel, delayed
from collections import defaultdict
ID, FORM, LEMMA, CPOS, FPOS, MORPH, HEAD, REL, PHEAD, PREL=range(10)
def get_conll_files(path):
sys.stderr.write("Entering {}\n".format(path))
dirs = []
files = []
for f in os.listdir(path):
f = os.path.join(path,f)
if os.path.isfile(f):
if f.endswith(".conll"):
files.append(f)
if os.path.isdir(f):
dirs.append(f)
for d in dirs:
files.extend( get_conll_files(d) )
return files
def read_conll(filename):
with open(filename) as f:
corpus = []
sentences = f.read().split("\n\n")
for sentence in sentences:
sentence = sentence.strip()
if sentence:
tokens = [ line.strip().split("\t") for line in sentence.split("\n") ]
assert(all([len(line) == 10 for line in tokens]))
corpus.append(tokens)
return corpus
def write_conll(corpus, filename):
of = open(filename, "w")
for sent in corpus:
for token in sent:
of.write("{}\n".format("\t".join(token)))
of.write("\n")
of.close()
def dump_distribution_dict(filename, dic):
ofstream = open(filename, "w")
for item in sorted(dic, key = lambda x : dic[x], reverse = True):
ofstream.write("{}\t{}\n".format(item, dic[item]))
ofstream.close()
def main(root, output):
os.system("mkdir -p {}".format(output))
file_list = get_conll_files(root)
voc = defaultdict(int)
tags = defaultdict(int)
sent_length = defaultdict(int)
doc_length_s = defaultdict(int)
doc_length_t = defaultdict(int)
n_tokens = 0
n_sentences = 0
n_documents = 0
for f in file_list:
corpus = read_conll(f)
n_documents += 1
doc_length_s[len(corpus)] += 1
doc_length_t[sum([len(s) for s in corpus])] += 1
n_sentences += len(corpus)
for sentence in corpus:
n_tokens += len(sentence)
sent_length[len(sentence)] += 1
for token in sentence:
voc[token[FORM]] += 1
tags[token[CPOS]] += 1
of = open(output+"/stats.txt", "w")
of.write("Number of documents : {}\n".format(n_documents))
of.write("Number of sentences : {}\n".format(n_sentences))
of.write("Number of tokens : {} (ignoring punctuation: {})\n".format(n_tokens, n_tokens - tags["PONCT"]))
of.write("Number of word types : {}\n".format(len(voc)))
of.close()
dump_distribution_dict(output+"/vocabulary", voc)
dump_distribution_dict(output+"/tags", tags)
dump_distribution_dict(output+"/doc_length_w", doc_length_t)
dump_distribution_dict(output+"/doc_length_s", doc_length_s)
dump_distribution_dict(output+"/sent_length", sent_length)
if __name__ == "__main__":
usage = """
Computes some statistics about parsed corpus (number of tokens, word types, etc.):
- Number of documents
- size of documents (num tokens, num sentences)
- Number of sentences
- size of sentences (num tokens)
- Number of tokens
- Number of word types (all, excluding NPP and ET)
"""
parser = argparse.ArgumentParser(description = usage, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("root", type = str, help="Directory (every subfolder will be searched for conll files)")
parser.add_argument("output", type = str, help="Output dir")
args = parser.parse_args()
main(args.root, args.output)
|
murphy-wang/aws-ec2 | refs/heads/master | lib/boto-2.34.0/boto/glacier/vault.py | 153 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
# Copyright (c) 2012 Robie Basak <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import codecs
from boto.glacier.exceptions import UploadArchiveError
from boto.glacier.job import Job
from boto.glacier.writer import compute_hashes_from_fileobj, \
resume_file_upload, Writer
from boto.glacier.concurrent import ConcurrentUploader
from boto.glacier.utils import minimum_part_size, DEFAULT_PART_SIZE
import os.path
_MEGABYTE = 1024 * 1024
_GIGABYTE = 1024 * _MEGABYTE
MAXIMUM_ARCHIVE_SIZE = 10000 * 4 * _GIGABYTE
MAXIMUM_NUMBER_OF_PARTS = 10000
class Vault(object):
DefaultPartSize = DEFAULT_PART_SIZE
SingleOperationThreshold = 100 * _MEGABYTE
ResponseDataElements = (('VaultName', 'name', None),
('VaultARN', 'arn', None),
('CreationDate', 'creation_date', None),
('LastInventoryDate', 'last_inventory_date', None),
('SizeInBytes', 'size', 0),
('NumberOfArchives', 'number_of_archives', 0))
def __init__(self, layer1, response_data=None):
self.layer1 = layer1
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
value = response_data[response_name]
setattr(self, attr_name, value)
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Vault("%s")' % self.arn
def delete(self):
"""
Delete's this vault. WARNING!
"""
self.layer1.delete_vault(self.name)
def upload_archive(self, filename, description=None):
"""
Adds an archive to a vault. For archives greater than 100MB the
multipart upload will be used.
:type file: str
:param file: A filename to upload
:type description: str
:param description: An optional description for the archive.
:rtype: str
:return: The archive id of the newly created archive
"""
if os.path.getsize(filename) > self.SingleOperationThreshold:
return self.create_archive_from_file(filename, description=description)
return self._upload_archive_single_operation(filename, description)
def _upload_archive_single_operation(self, filename, description):
"""
Adds an archive to a vault in a single operation. It's recommended for
archives less than 100MB
:type file: str
:param file: A filename to upload
:type description: str
:param description: A description for the archive.
:rtype: str
:return: The archive id of the newly created archive
"""
with open(filename, 'rb') as fileobj:
linear_hash, tree_hash = compute_hashes_from_fileobj(fileobj)
fileobj.seek(0)
response = self.layer1.upload_archive(self.name, fileobj,
linear_hash, tree_hash,
description)
return response['ArchiveId']
def create_archive_writer(self, part_size=DefaultPartSize,
description=None):
"""
Create a new archive and begin a multi-part upload to it.
Returns a file-like object to which the data for the archive
can be written. Once all the data is written the file-like
object should be closed, you can then call the get_archive_id
method on it to get the ID of the created archive.
:type part_size: int
:param part_size: The part size for the multipart upload.
:type description: str
:param description: An optional description for the archive.
:rtype: :class:`boto.glacier.writer.Writer`
:return: A Writer object that to which the archive data
should be written.
"""
response = self.layer1.initiate_multipart_upload(self.name,
part_size,
description)
return Writer(self, response['UploadId'], part_size=part_size)
def create_archive_from_file(self, filename=None, file_obj=None,
description=None, upload_id_callback=None):
"""
Create a new archive and upload the data from the given file
or file-like object.
:type filename: str
:param filename: A filename to upload
:type file_obj: file
:param file_obj: A file-like object to upload
:type description: str
:param description: An optional description for the archive.
:type upload_id_callback: function
:param upload_id_callback: if set, call with the upload_id as the
only parameter when it becomes known, to enable future calls
to resume_archive_from_file in case resume is needed.
:rtype: str
:return: The archive id of the newly created archive
"""
part_size = self.DefaultPartSize
if not file_obj:
file_size = os.path.getsize(filename)
try:
part_size = minimum_part_size(file_size, part_size)
except ValueError:
raise UploadArchiveError("File size of %s bytes exceeds "
"40,000 GB archive limit of Glacier.")
file_obj = open(filename, "rb")
writer = self.create_archive_writer(
description=description,
part_size=part_size)
if upload_id_callback:
upload_id_callback(writer.upload_id)
while True:
data = file_obj.read(part_size)
if not data:
break
writer.write(data)
writer.close()
return writer.get_archive_id()
@staticmethod
def _range_string_to_part_index(range_string, part_size):
start, inside_end = [int(value) for value in range_string.split('-')]
end = inside_end + 1
length = end - start
if length == part_size + 1:
# Off-by-one bug in Amazon's Glacier implementation,
# see: https://forums.aws.amazon.com/thread.jspa?threadID=106866
# Workaround: since part_size is too big by one byte, adjust it
end -= 1
inside_end -= 1
length -= 1
assert not (start % part_size), (
"upload part start byte is not on a part boundary")
assert (length <= part_size), "upload part is bigger than part size"
return start // part_size
def resume_archive_from_file(self, upload_id, filename=None,
file_obj=None):
"""Resume upload of a file already part-uploaded to Glacier.
The resumption of an upload where the part-uploaded section is empty
is a valid degenerate case that this function can handle.
One and only one of filename or file_obj must be specified.
:type upload_id: str
:param upload_id: existing Glacier upload id of upload being resumed.
:type filename: str
:param filename: file to open for resume
:type fobj: file
:param fobj: file-like object containing local data to resume. This
must read from the start of the entire upload, not just from the
point being resumed. Use fobj.seek(0) to achieve this if necessary.
:rtype: str
:return: The archive id of the newly created archive
"""
part_list_response = self.list_all_parts(upload_id)
part_size = part_list_response['PartSizeInBytes']
part_hash_map = {}
for part_desc in part_list_response['Parts']:
part_index = self._range_string_to_part_index(
part_desc['RangeInBytes'], part_size)
part_tree_hash = codecs.decode(part_desc['SHA256TreeHash'], 'hex_codec')
part_hash_map[part_index] = part_tree_hash
if not file_obj:
file_obj = open(filename, "rb")
return resume_file_upload(
self, upload_id, part_size, file_obj, part_hash_map)
def concurrent_create_archive_from_file(self, filename, description,
**kwargs):
"""
Create a new archive from a file and upload the given
file.
This is a convenience method around the
:class:`boto.glacier.concurrent.ConcurrentUploader`
class. This method will perform a multipart upload
and upload the parts of the file concurrently.
:type filename: str
:param filename: A filename to upload
:param kwargs: Additional kwargs to pass through to
:py:class:`boto.glacier.concurrent.ConcurrentUploader`.
You can pass any argument besides the ``api`` and
``vault_name`` param (these arguments are already
passed to the ``ConcurrentUploader`` for you).
:raises: `boto.glacier.exception.UploadArchiveError` is an error
occurs during the upload process.
:rtype: str
:return: The archive id of the newly created archive
"""
uploader = ConcurrentUploader(self.layer1, self.name, **kwargs)
archive_id = uploader.upload(filename, description)
return archive_id
def retrieve_archive(self, archive_id, sns_topic=None,
description=None):
"""
Initiate a archive retrieval job to download the data from an
archive. You will need to wait for the notification from
Amazon (via SNS) before you can actually download the data,
this takes around 4 hours.
:type archive_id: str
:param archive_id: The id of the archive
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
job_data = {'Type': 'archive-retrieval',
'ArchiveId': archive_id}
if sns_topic is not None:
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
response = self.layer1.initiate_job(self.name, job_data)
return self.get_job(response['JobId'])
def retrieve_inventory(self, sns_topic=None,
description=None, byte_range=None,
start_date=None, end_date=None,
limit=None):
"""
Initiate a inventory retrieval job to list the items in the
vault. You will need to wait for the notification from
Amazon (via SNS) before you can actually download the data,
this takes around 4 hours.
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:type byte_range: str
:param byte_range: Range of bytes to retrieve.
:type start_date: DateTime
:param start_date: Beginning of the date range to query.
:type end_date: DateTime
:param end_date: End of the date range to query.
:type limit: int
:param limit: Limits the number of results returned.
:rtype: str
:return: The ID of the job
"""
job_data = {'Type': 'inventory-retrieval'}
if sns_topic is not None:
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
if byte_range is not None:
job_data['RetrievalByteRange'] = byte_range
if start_date is not None or end_date is not None or limit is not None:
rparams = {}
if start_date is not None:
rparams['StartDate'] = start_date.strftime('%Y-%m-%dT%H:%M:%S%Z')
if end_date is not None:
rparams['EndDate'] = end_date.strftime('%Y-%m-%dT%H:%M:%S%Z')
if limit is not None:
rparams['Limit'] = limit
job_data['InventoryRetrievalParameters'] = rparams
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
def retrieve_inventory_job(self, **kwargs):
"""
Identical to ``retrieve_inventory``, but returns a ``Job`` instance
instead of just the job ID.
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:type byte_range: str
:param byte_range: Range of bytes to retrieve.
:type start_date: DateTime
:param start_date: Beginning of the date range to query.
:type end_date: DateTime
:param end_date: End of the date range to query.
:type limit: int
:param limit: Limits the number of results returned.
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
job_id = self.retrieve_inventory(**kwargs)
return self.get_job(job_id)
def delete_archive(self, archive_id):
"""
This operation deletes an archive from the vault.
:type archive_id: str
:param archive_id: The ID for the archive to be deleted.
"""
return self.layer1.delete_archive(self.name, archive_id)
def get_job(self, job_id):
"""
Get an object representing a job in progress.
:type job_id: str
:param job_id: The ID of the job
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the job.
"""
response_data = self.layer1.describe_job(self.name, job_id)
return Job(self, response_data)
def list_jobs(self, completed=None, status_code=None):
"""
Return a list of Job objects related to this vault.
:type completed: boolean
:param completed: Specifies the state of the jobs to return.
If a value of True is passed, only completed jobs will
be returned. If a value of False is passed, only
uncompleted jobs will be returned. If no value is
passed, all jobs will be returned.
:type status_code: string
:param status_code: Specifies the type of job status to return.
Valid values are: InProgress|Succeeded|Failed. If not
specified, jobs with all status codes are returned.
:rtype: list of :class:`boto.glacier.job.Job`
:return: A list of Job objects related to this vault.
"""
response_data = self.layer1.list_jobs(self.name, completed,
status_code)
return [Job(self, jd) for jd in response_data['JobList']]
def list_all_parts(self, upload_id):
"""Automatically make and combine multiple calls to list_parts.
Call list_parts as necessary, combining the results in case multiple
calls were required to get data on all available parts.
"""
result = self.layer1.list_parts(self.name, upload_id)
marker = result['Marker']
while marker:
additional_result = self.layer1.list_parts(
self.name, upload_id, marker=marker)
result['Parts'].extend(additional_result['Parts'])
marker = additional_result['Marker']
# The marker makes no sense in an unpaginated result, and clearing it
# makes testing easier. This also has the nice property that the result
# is a normal (but expanded) response.
result['Marker'] = None
return result
|
mancoast/CPythonPyc_test | refs/heads/master | fail/341_test_nis.py | 88 | from test import support
import unittest
import sys
# Skip test if nis module does not exist.
nis = support.import_module('nis')
class NisTests(unittest.TestCase):
def test_maps(self):
try:
maps = nis.maps()
except nis.error as msg:
# NIS is probably not active, so this test isn't useful
self.skipTest(str(msg))
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except ValueError:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
def test_main():
support.run_unittest(NisTests)
if __name__ == '__main__':
test_main()
|
britcey/ansible | refs/heads/devel | lib/ansible/modules/files/copy.py | 15 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: copy
version_added: "historical"
short_description: Copies files to remote locations.
description:
- The C(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box.
If you need variable interpolation in copied files, use the M(template) module.
options:
src:
description:
- Local path to a file to copy to the remote server; can be absolute or relative.
If path is a directory, it is copied recursively. In this case, if path ends
with "/", only inside contents of that directory are copied to destination.
Otherwise, if it does not end with "/", the directory itself with all contents
is copied. This behavior is similar to Rsync.
required: false
default: null
aliases: []
content:
version_added: "1.1"
description:
- When used instead of 'src', sets the contents of a file directly to the specified value.
This is for simple values, for anything complex or with formatting please switch to the template module.
required: false
default: null
dest:
description:
- Remote absolute path where the file should be copied to. If src is a directory,
this must be a directory too.
required: true
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
version_added: "0.7"
required: false
choices: [ "yes", "no" ]
default: "no"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
version_added: "1.1"
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "thirsty" ]
directory_mode:
description:
- When doing a recursive copy set the mode for the directories. If this is not set we will use the system
defaults. The mode is only set on directories which are newly created, and will not affect those that
already existed.
required: false
version_added: "1.5"
remote_src:
description:
- If False, it will search for src at originating/master machine, if True it will go to the remote/target machine for the src. Default is False.
- Currently remote_src does not support recursive copying.
choices: [ "True", "False" ]
required: false
default: "False"
version_added: "2.0"
follow:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.8"
description:
- 'This flag indicates that filesystem links, if they exist, should be followed.'
extends_documentation_fragment:
- files
- validate
- decrypt
author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
- The "copy" module recursively copy facility does not scale to lots (>hundreds) of files.
For alternative, see synchronize module, which is a wrapper around rsync.
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: 0644
# The same example as above, but using a symbolic mode equivalent to 0644
- copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: "u=rw,g=r,o=r"
# Another symbolic mode example, adding some permissions and removing others
- copy:
src: /srv/myfiles/foo.conf
dest: /etc/foo.conf
owner: foo
group: foo
mode: "u+rw,g-wx,o-rwx"
# Copy a new "ntp.conf file into place, backing up the original if it differs from the copied version
- copy:
src: /mine/ntp.conf
dest: /etc/ntp.conf
owner: root
group: root
mode: 0644
backup: yes
# Copy a new "sudoers" file into place, after passing validation with visudo
- copy:
src: /mine/sudoers
dest: /etc/sudoers
validate: 'visudo -cf %s'
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: string
sample: "/path/to/file.txt"
src:
description: source file used for the copy on the target machine
returned: changed
type: string
sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source"
md5sum:
description: md5 checksum of the file after running copy
returned: when supported
type: string
sample: "2a5aeecc61dc98c4d780b14b330e3282"
checksum:
description: sha1 checksum of the file after running copy
returned: success
type: string
sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827"
backup_file:
description: name of backup file created
returned: changed and if backup=yes
type: string
sample: "/path/to/file.txt.2015-02-12@22:09~"
gid:
description: group id of the file, after execution
returned: success
type: int
sample: 100
group:
description: group of the file, after execution
returned: success
type: string
sample: "httpd"
owner:
description: owner of the file, after execution
returned: success
type: string
sample: "httpd"
uid:
description: owner id of the file, after execution
returned: success
type: int
sample: 100
mode:
description: permissions of the target, after execution
returned: success
type: string
sample: "0644"
size:
description: size of the target, after execution
returned: success
type: int
sample: 1220
state:
description: state of the target, after execution
returned: success
type: string
sample: "file"
'''
import os
import shutil
import tempfile
import traceback
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_bytes, to_native
def split_pre_existing_dir(dirname):
'''
Return the first pre-existing directory and a list of the new directories that will be created.
'''
head, tail = os.path.split(dirname)
b_head = to_bytes(head, errors='surrogate_or_strict')
if not os.path.exists(b_head):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(head)
else:
return (head, [tail])
new_directory_list.append(tail)
return (pre_existing_dir, new_directory_list)
def adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed):
'''
Walk the new directories list and make sure that permissions are as we would expect
'''
if len(new_directory_list) > 0:
working_dir = os.path.join(pre_existing_dir, new_directory_list.pop(0))
directory_args['path'] = working_dir
changed = module.set_fs_attributes_if_different(directory_args, changed)
changed = adjust_recursive_directory_permissions(working_dir, new_directory_list, module, directory_args, changed)
return changed
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
src = dict(required=False, type='path'),
original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
content = dict(required=False, no_log=True),
dest = dict(required=True, type='path'),
backup = dict(default=False, type='bool'),
force = dict(default=True, aliases=['thirsty'], type='bool'),
validate = dict(required=False, type='str'),
directory_mode = dict(required=False, type='raw'),
remote_src = dict(required=False, type='bool'),
),
add_file_common_args=True,
supports_check_mode=True,
)
src = module.params['src']
b_src = to_bytes(src, errors='surrogate_or_strict')
dest = module.params['dest']
b_dest = to_bytes(dest, errors='surrogate_or_strict')
backup = module.params['backup']
force = module.params['force']
original_basename = module.params.get('original_basename', None)
validate = module.params.get('validate', None)
follow = module.params['follow']
mode = module.params['mode']
remote_src = module.params['remote_src']
if not os.path.exists(b_src):
module.fail_json(msg="Source %s not found" % (src))
if not os.access(b_src, os.R_OK):
module.fail_json(msg="Source %s not readable" % (src))
if os.path.isdir(b_src):
module.fail_json(msg="Remote copy does not support recursive copy of directory: %s" % (src))
checksum_src = module.sha1(src)
checksum_dest = None
# Backwards compat only. This will be None in FIPS mode
try:
md5sum_src = module.md5(src)
except ValueError:
md5sum_src = None
changed = False
# Special handling for recursive copy - create intermediate dirs
if original_basename and dest.endswith(os.sep):
dest = os.path.join(dest, original_basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
dirname = os.path.dirname(dest)
b_dirname = to_bytes(dirname, errors='surrogate_or_strict')
if not os.path.exists(b_dirname) and os.path.isabs(b_dirname):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
os.makedirs(b_dirname)
directory_args = module.load_file_common_arguments(module.params)
directory_mode = module.params["directory_mode"]
if directory_mode is not None:
directory_args['mode'] = directory_mode
else:
directory_args['mode'] = None
adjust_recursive_directory_permissions(pre_existing_dir, new_directory_list, module, directory_args, changed)
if os.path.isdir(b_dest):
basename = os.path.basename(src)
if original_basename:
basename = original_basename
dest = os.path.join(dest, basename)
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
if os.path.islink(b_dest) and follow:
b_dest = os.path.realpath(b_dest)
dest = to_native(b_dest, errors='surrogate_or_strict')
if not force:
module.exit_json(msg="file already exists", src=src, dest=dest, changed=False)
if os.access(b_dest, os.R_OK):
checksum_dest = module.sha1(dest)
else:
if not os.path.exists(os.path.dirname(b_dest)):
try:
# os.path.exists() can return false in some
# circumstances where the directory does not have
# the execute bit for the current user set, in
# which case the stat() call will raise an OSError
os.stat(os.path.dirname(b_dest))
except OSError:
e = get_exception()
if "permission denied" in to_native(e).lower():
module.fail_json(msg="Destination directory %s is not accessible" % (os.path.dirname(dest)))
module.fail_json(msg="Destination directory %s does not exist" % (os.path.dirname(dest)))
if not os.access(os.path.dirname(b_dest), os.W_OK):
module.fail_json(msg="Destination %s not writable" % (os.path.dirname(dest)))
backup_file = None
if checksum_src != checksum_dest or os.path.islink(b_dest):
if not module.check_mode:
try:
if backup:
if os.path.exists(b_dest):
backup_file = module.backup_local(dest)
# allow for conversion from symlink.
if os.path.islink(b_dest):
os.unlink(b_dest)
open(b_dest, 'w').close()
if validate:
# if we have a mode, make sure we set it on the temporary
# file source as some validations may require it
# FIXME: should we do the same for owner/group here too?
if mode is not None:
module.set_mode_if_different(src, mode, False)
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate", exit_status=rc, stdout=out, stderr=err)
b_mysrc = b_src
if remote_src:
_, b_mysrc = tempfile.mkstemp(dir=os.path.dirname(b_dest))
shutil.copy2(b_src, b_mysrc)
module.atomic_move(b_mysrc, dest, unsafe_writes=module.params['unsafe_writes'])
except IOError:
module.fail_json(msg="failed to copy: %s to %s" % (src, dest), traceback=traceback.format_exc())
changed = True
else:
changed = False
res_args = dict(
dest=dest, src=src, md5sum=md5sum_src, checksum=checksum_src, changed=changed
)
if backup_file:
res_args['backup_file'] = backup_file
module.params['dest'] = dest
if not module.check_mode:
file_args = module.load_file_common_arguments(module.params)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
if __name__ == '__main__':
main()
|
Mause/pytransperth | refs/heads/master | docs/source/quickstart.py | 1 | import os
import sys
sys.path.insert(
0,
os.path.join(os.path.dirname(__file__), '..', '..')
)
# create the Location object you wish to resolve;
from transperth.jp.location import (
Location,
ResolvedLocation,
determine_location
)
from_location = Location.from_location('Curtin University, Perth')
to_location = Location.from_location('Arena Joondalup')
# then we resolve it into something that the transperth api will accept
locations = determine_location(from_location, to_location)
# determine_location will return a dictionary like so;
{
'<DIRECTION>': [
ResolvedLocation('<NAME>', '<CODE>'),
# etc
]
}
# it would be reasonable to assume the first result is correct,
# or to let the end user choose from a list
from_location = locations['from'][0]
to_location = locations['to'][0]
# once we have these, we can grab the routes
from transperth.jp.routes import determine_routes
routes = determine_routes(from_location, to_location)
# take your pick of the routes
route = routes[0]
# and use 'em how you like
from transperth.smart_rider.trips import timedelta_repr
print(timedelta_repr(route['meta']['duration']))
|
efortuna/AndroidSDKClone | refs/heads/master | ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/test/test_types.py | 113 | # Python test set -- part 6, built-in types
from test.test_support import run_unittest, have_unicode, run_with_locale, \
check_py3k_warnings
import unittest
import sys
import locale
class TypesTests(unittest.TestCase):
def test_truth_values(self):
if None: self.fail('None is true instead of false')
if 0: self.fail('0 is true instead of false')
if 0L: self.fail('0L is true instead of false')
if 0.0: self.fail('0.0 is true instead of false')
if '': self.fail('\'\' is true instead of false')
if not 1: self.fail('1 is false instead of true')
if not 1L: self.fail('1L is false instead of true')
if not 1.0: self.fail('1.0 is false instead of true')
if not 'x': self.fail('\'x\' is false instead of true')
if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true')
def f(): pass
class C: pass
x = C()
if not f: self.fail('f is false instead of true')
if not C: self.fail('C is false instead of true')
if not sys: self.fail('sys is false instead of true')
if not x: self.fail('x is false instead of true')
def test_boolean_ops(self):
if 0 or 0: self.fail('0 or 0 is true instead of false')
if 1 and 1: pass
else: self.fail('1 and 1 is false instead of true')
if not 1: self.fail('not 1 is true instead of false')
def test_comparisons(self):
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: self.fail('int comparisons failed')
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: self.fail('long int comparisons failed')
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: self.fail('float comparisons failed')
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: self.fail('string comparisons failed')
if None is None: pass
else: self.fail('identity test failed')
def test_float_constructor(self):
self.assertRaises(ValueError, float, '')
self.assertRaises(ValueError, float, '5\0')
def test_zero_division(self):
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError")
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError")
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError")
try: 5 / 0L
except ZeroDivisionError: pass
else: self.fail("5 / 0L didn't raise ZeroDivisionError")
try: 5 // 0L
except ZeroDivisionError: pass
else: self.fail("5 // 0L didn't raise ZeroDivisionError")
try: 5 % 0L
except ZeroDivisionError: pass
else: self.fail("5 % 0L didn't raise ZeroDivisionError")
def test_numeric_types(self):
if 0 != 0L or 0 != 0.0 or 0L != 0.0: self.fail('mixed comparisons')
if 1 != 1L or 1 != 1.0 or 1L != 1.0: self.fail('mixed comparisons')
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
self.fail('int/long/float value not equal')
# calling built-in types without argument must return 0
if int() != 0: self.fail('int() does not return 0')
if long() != 0L: self.fail('long() does not return 0L')
if float() != 0.0: self.fail('float() does not return 0.0')
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: self.fail('int() does not round properly')
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: self.fail('long() does not round properly')
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: self.fail('float() does not work properly')
def test_float_to_string(self):
def test(f, result):
self.assertEqual(f.__format__('e'), result)
self.assertEqual('%e' % f, result)
# test all 2 digit exponents, both with __format__ and with
# '%' formatting
for i in range(-99, 100):
test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i))
# test some 3 digit exponents
self.assertEqual(1.5e100.__format__('e'), '1.500000e+100')
self.assertEqual('%e' % 1.5e100, '1.500000e+100')
self.assertEqual(1.5e101.__format__('e'), '1.500000e+101')
self.assertEqual('%e' % 1.5e101, '1.500000e+101')
self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100')
self.assertEqual('%e' % 1.5e-100, '1.500000e-100')
self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101')
self.assertEqual('%e' % 1.5e-101, '1.500000e-101')
self.assertEqual('%g' % 1.0, '1')
self.assertEqual('%#g' % 1.0, '1.00000')
def test_normal_integers(self):
# Ensure the first 256 integers are shared
a = 256
b = 128*2
if a is not b: self.fail('256 is not shared')
if 12 + 24 != 36: self.fail('int op')
if 12 + (-24) != -12: self.fail('int op')
if (-12) + 24 != 12: self.fail('int op')
if (-12) + (-24) != -36: self.fail('int op')
if not 12 < 24: self.fail('int op')
if not -24 < -12: self.fail('int op')
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
self.fail('int mul commutativity')
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
self.fail("%r * %r == %r != %r" % (divisor, j, prod, m))
if type(prod) is not int:
self.fail("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
def test_long_integers(self):
if 12L + 24L != 36L: self.fail('long op')
if 12L + (-24L) != -12L: self.fail('long op')
if (-12L) + 24L != 12L: self.fail('long op')
if (-12L) + (-24L) != -36L: self.fail('long op')
if not 12L < 24L: self.fail('long op')
if not -24L < -12L: self.fail('long op')
x = sys.maxint
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)+1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
x = -x
if int(long(x)) != x: self.fail('long op')
x = x-1
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)-1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5L << -5L
except ValueError: pass
else: self.fail('long negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
try: 5L >> -5L
except ValueError: pass
else: self.fail('long negative shift >>')
def test_floats(self):
if 12.0 + 24.0 != 36.0: self.fail('float op')
if 12.0 + (-24.0) != -12.0: self.fail('float op')
if (-12.0) + 24.0 != 12.0: self.fail('float op')
if (-12.0) + (-24.0) != -36.0: self.fail('float op')
if not 12.0 < 24.0: self.fail('float op')
if not -24.0 < -12.0: self.fail('float op')
def test_strings(self):
if len('') != 0: self.fail('len(\'\')')
if len('a') != 1: self.fail('len(\'a\')')
if len('abcdef') != 6: self.fail('len(\'abcdef\')')
if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation')
if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3')
if 0*'abcde' != '': self.fail('string repetition 0*')
if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string')
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: self.fail('in/not in string')
x = 'x'*103
if '%s!'%x != x+'!': self.fail('nasty string formatting bug')
#extended slices for strings
a = '0123456789'
self.assertEqual(a[::], a)
self.assertEqual(a[::2], '02468')
self.assertEqual(a[1::2], '13579')
self.assertEqual(a[::-1],'9876543210')
self.assertEqual(a[::-2], '97531')
self.assertEqual(a[3::-2], '31')
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], '02468')
if have_unicode:
a = unicode('0123456789', 'ascii')
self.assertEqual(a[::], a)
self.assertEqual(a[::2], unicode('02468', 'ascii'))
self.assertEqual(a[1::2], unicode('13579', 'ascii'))
self.assertEqual(a[::-1], unicode('9876543210', 'ascii'))
self.assertEqual(a[::-2], unicode('97531', 'ascii'))
self.assertEqual(a[3::-2], unicode('31', 'ascii'))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], unicode('02468', 'ascii'))
def test_type_function(self):
self.assertRaises(TypeError, type, 1, 2)
self.assertRaises(TypeError, type, 1, 2, 3, 4)
def test_buffers(self):
self.assertRaises(ValueError, buffer, 'asdf', -1)
cmp(buffer("abc"), buffer("def")) # used to raise a warning: tp_compare didn't return -1, 0, or 1
self.assertRaises(TypeError, buffer, None)
a = buffer('asdf')
hash(a)
b = a * 5
if a == b:
self.fail('buffers should not be equal')
if str(b) != ('asdf' * 5):
self.fail('repeated buffer has wrong content')
if str(a * 0) != '':
self.fail('repeated buffer zero times has wrong content')
if str(a + buffer('def')) != 'asdfdef':
self.fail('concatenation of buffers yields wrong content')
if str(buffer(a)) != 'asdf':
self.fail('composing buffers failed')
if str(buffer(a, 2)) != 'df':
self.fail('specifying buffer offset failed')
if str(buffer(a, 0, 2)) != 'as':
self.fail('specifying buffer size failed')
if str(buffer(a, 1, 2)) != 'sd':
self.fail('specifying buffer offset and size failed')
self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1)
if str(buffer(buffer('asdf', 0, 2), 0)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's':
self.fail('composing length-specified buffer failed')
try: a[1] = 'g'
except TypeError: pass
else: self.fail("buffer assignment should raise TypeError")
try: a[0:1] = 'g'
except TypeError: pass
else: self.fail("buffer slice assignment should raise TypeError")
# array.array() returns an object that does not implement a char buffer,
# something which int() uses for conversion.
import array
try: int(buffer(array.array('c')))
except TypeError: pass
else: self.fail("char buffer (at C level) not working")
def test_int__format__(self):
def test(i, format_spec, result):
# just make sure I'm not accidentally checking longs
assert type(i) == int
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(123456789, 'd', '123456789')
test(123456789, 'd', '123456789')
test(1, 'c', '\01')
# sign and aligning are interdependent
test(1, "-", '1')
test(-1, "-", '-1')
test(1, "-3", ' 1')
test(-1, "-3", ' -1')
test(1, "+3", ' +1')
test(-1, "+3", ' -1')
test(1, " 3", ' 1')
test(-1, " 3", ' -1')
test(1, " ", ' 1')
test(-1, " ", '-1')
# hex
test(3, "x", "3")
test(3, "X", "3")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(1234, "8x", " 4d2")
test(-1234, "8x", " -4d2")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(-3, "x", "-3")
test(-3, "X", "-3")
test(int('be', 16), "x", "be")
test(int('be', 16), "X", "BE")
test(-int('be', 16), "x", "-be")
test(-int('be', 16), "X", "-BE")
# octal
test(3, "o", "3")
test(-3, "o", "-3")
test(65, "o", "101")
test(-65, "o", "-101")
test(1234, "o", "2322")
test(-1234, "o", "-2322")
test(1234, "-o", "2322")
test(-1234, "-o", "-2322")
test(1234, " o", " 2322")
test(-1234, " o", "-2322")
test(1234, "+o", "+2322")
test(-1234, "+o", "-2322")
# binary
test(3, "b", "11")
test(-3, "b", "-11")
test(1234, "b", "10011010010")
test(-1234, "b", "-10011010010")
test(1234, "-b", "10011010010")
test(-1234, "-b", "-10011010010")
test(1234, " b", " 10011010010")
test(-1234, " b", "-10011010010")
test(1234, "+b", "+10011010010")
test(-1234, "+b", "-10011010010")
# alternate (#) formatting
test(0, "#b", '0b0')
test(0, "-#b", '0b0')
test(1, "-#b", '0b1')
test(-1, "-#b", '-0b1')
test(-1, "-#5b", ' -0b1')
test(1, "+#5b", ' +0b1')
test(100, "+#b", '+0b1100100')
test(100, "#012b", '0b0001100100')
test(-100, "#012b", '-0b001100100')
test(0, "#o", '0o0')
test(0, "-#o", '0o0')
test(1, "-#o", '0o1')
test(-1, "-#o", '-0o1')
test(-1, "-#5o", ' -0o1')
test(1, "+#5o", ' +0o1')
test(100, "+#o", '+0o144')
test(100, "#012o", '0o0000000144')
test(-100, "#012o", '-0o000000144')
test(0, "#x", '0x0')
test(0, "-#x", '0x0')
test(1, "-#x", '0x1')
test(-1, "-#x", '-0x1')
test(-1, "-#5x", ' -0x1')
test(1, "+#5x", ' +0x1')
test(100, "+#x", '+0x64')
test(100, "#012x", '0x0000000064')
test(-100, "#012x", '-0x000000064')
test(123456, "#012x", '0x000001e240')
test(-123456, "#012x", '-0x00001e240')
test(0, "#X", '0X0')
test(0, "-#X", '0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
# issue 5782, commas with no specifier type
test(1234, '010,', '00,001,234')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# can't have ',' with 'c'
self.assertRaises(ValueError, 3 .__format__, ",c")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456, "0<20", '12345600000000000000')
test(123456, "1<20", '12345611111111111111')
test(123456, "*<20", '123456**************')
test(123456, "0>20", '00000000000000123456')
test(123456, "1>20", '11111111111111123456')
test(123456, "*>20", '**************123456')
test(123456, "0=20", '00000000000000123456')
test(123456, "1=20", '11111111111111123456')
test(123456, "*=20", '**************123456')
def test_long__format__(self):
def test(i, format_spec, result):
# make sure we're not accidentally checking ints
assert type(i) == long
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
test(123456789L, 'd', '123456789')
test(123456789L, 'd', '123456789')
# sign and aligning are interdependent
test(1L, "-", '1')
test(-1L, "-", '-1')
test(1L, "-3", ' 1')
test(-1L, "-3", ' -1')
test(1L, "+3", ' +1')
test(-1L, "+3", ' -1')
test(1L, " 3", ' 1')
test(-1L, " 3", ' -1')
test(1L, " ", ' 1')
test(-1L, " ", '-1')
test(1L, 'c', '\01')
# hex
test(3L, "x", "3")
test(3L, "X", "3")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(1234L, "8x", " 4d2")
test(-1234L, "8x", " -4d2")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(-3L, "x", "-3")
test(-3L, "X", "-3")
test(long('be', 16), "x", "be")
test(long('be', 16), "X", "BE")
test(-long('be', 16), "x", "-be")
test(-long('be', 16), "X", "-BE")
# octal
test(3L, "o", "3")
test(-3L, "o", "-3")
test(65L, "o", "101")
test(-65L, "o", "-101")
test(1234L, "o", "2322")
test(-1234L, "o", "-2322")
test(1234L, "-o", "2322")
test(-1234L, "-o", "-2322")
test(1234L, " o", " 2322")
test(-1234L, " o", "-2322")
test(1234L, "+o", "+2322")
test(-1234L, "+o", "-2322")
# binary
test(3L, "b", "11")
test(-3L, "b", "-11")
test(1234L, "b", "10011010010")
test(-1234L, "b", "-10011010010")
test(1234L, "-b", "10011010010")
test(-1234L, "-b", "-10011010010")
test(1234L, " b", " 10011010010")
test(-1234L, " b", "-10011010010")
test(1234L, "+b", "+10011010010")
test(-1234L, "+b", "-10011010010")
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3L .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3L .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3L .__format__, None)
self.assertRaises(TypeError, 3L .__format__, 0)
# alternate specifier in wrong place
self.assertRaises(ValueError, 1L .__format__, "#+5x")
self.assertRaises(ValueError, 1L .__format__, "+5#x")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0L .__format__, format_spec)
self.assertRaises(ValueError, 1L .__format__, format_spec)
self.assertRaises(ValueError, (-1L) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the long to a float
for format_spec in 'eEfFgG%':
for value in [0L, 1L, -1L, 100L, -100L, 1234567890L, -1234567890L]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
# Issue 6902
test(123456L, "0<20", '12345600000000000000')
test(123456L, "1<20", '12345611111111111111')
test(123456L, "*<20", '123456**************')
test(123456L, "0>20", '00000000000000123456')
test(123456L, "1>20", '11111111111111123456')
test(123456L, "*>20", '**************123456')
test(123456L, "0=20", '00000000000000123456')
test(123456L, "1=20", '11111111111111123456')
test(123456L, "*=20", '**************123456')
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
def test_float__format__(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
def test(f, format_spec, result):
assert type(f) == float
assert type(format_spec) == str
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(f.__format__(unicode(format_spec)), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
# Python versions <= 2.6 switched from 'f' to 'g' formatting for
# values larger than 1e50. No longer.
f = 1.1234e90
for fmt in 'f', 'F':
# don't do a direct equality check, since on some
# platforms only the first few digits of dtoa
# will be reliable
result = f.__format__(fmt)
self.assertEqual(len(result), 98)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
f = 1.1234e200
for fmt in 'f', 'F':
result = f.__format__(fmt)
self.assertEqual(len(result), 208)
self.assertEqual(result[-7], '.')
self.assertIn(result[:12], ('112340000000', '112339999999'))
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totaly empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# 0 padding
test(1234., '010f', '1234.000000')
test(1234., '011f', '1234.000000')
test(1234., '012f', '01234.000000')
test(-1234., '011f', '-1234.000000')
test(-1234., '012f', '-1234.000000')
test(-1234., '013f', '-01234.000000')
test(-1234.12341234, '013f', '-01234.123412')
test(-123456.12341234, '011.2f', '-0123456.12')
# issue 5782, commas with no specifier type
test(1.2, '010,.2', '0,000,001.2')
# 0 padding with commas
test(1234., '011,f', '1,234.000000')
test(1234., '012,f', '1,234.000000')
test(1234., '013,f', '01,234.000000')
test(-1234., '012,f', '-1,234.000000')
test(-1234., '013,f', '-1,234.000000')
test(-1234., '014,f', '-01,234.000000')
test(-12345., '015,f', '-012,345.000000')
test(-123456., '016,f', '-0,123,456.000000')
test(-123456., '017,f', '-0,123,456.000000')
test(-123456.12341234, '017,f', '-0,123,456.123412')
test(-123456.12341234, '013,.2f', '-0,123,456.12')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, 0.0, '#')
self.assertRaises(ValueError, format, 0.0, '#20f')
# Issue 6902
test(12345.6, "0<20", '12345.60000000000000')
test(12345.6, "1<20", '12345.61111111111111')
test(12345.6, "*<20", '12345.6*************')
test(12345.6, "0>20", '000000000000012345.6')
test(12345.6, "1>20", '111111111111112345.6')
test(12345.6, "*>20", '*************12345.6')
test(12345.6, "0=20", '000000000000012345.6')
test(12345.6, "1=20", '111111111111112345.6')
test(12345.6, "*=20", '*************12345.6')
def test_format_spec_errors(self):
# int, float, and string all share the same format spec
# mini-language parser.
# Check that we can't ask for too many digits. This is
# probably a CPython specific test. It tries to put the width
# into a C long.
self.assertRaises(ValueError, format, 0, '1'*10000 + 'd')
# Similar with the precision.
self.assertRaises(ValueError, format, 0, '.' + '1'*10000 + 'd')
# And may as well test both.
self.assertRaises(ValueError, format, 0, '1'*1000 + '.' + '1'*10000 + 'd')
# Make sure commas aren't allowed with various type codes
for code in 'xXobns':
self.assertRaises(ValueError, format, 0, ',' + code)
def test_internal_sizes(self):
self.assertGreater(object.__basicsize__, 0)
self.assertGreater(tuple.__itemsize__, 0)
def test_main():
with check_py3k_warnings(
("buffer.. not supported", DeprecationWarning),
("classic long division", DeprecationWarning)):
run_unittest(TypesTests)
if __name__ == '__main__':
test_main()
|
untitaker/mysteryshack | refs/heads/master | scripts/make_staticfiles.py | 1 | #!/usr/bin/python
import mimetypes
import os
static_path = './src/static/'
output_file = "./src/mysteryshack/web/staticfiles.rs"
f = open(output_file, "w")
def w(s=''):
if s:
f.write(s)
f.write('\n')
w("// Generated using scripts/make_static.py. Do NOT edit directly!")
w()
w('use router::Router;')
w('use hyper::header;')
w('use iron::prelude::*;')
w('use iron::modifiers::Header;')
w('use iron::status;')
w()
w('pub fn get_static_handler() -> Router {')
w(' let mut r = Router::new();')
def recurse_files(p):
for dirpath, dirnames, filenames in os.walk(p):
for filename in filenames:
yield os.path.join(dirpath, filename)
for abs_filepath in sorted(recurse_files(static_path)):
mimetype, encoding = mimetypes.guess_type(abs_filepath)
contenttype = mimetype
if encoding:
contenttype += ' charset=' + encoding
w(' r.get("/{route}", (|_: &mut Request|\n'
' Ok(Response::with((\n'
' status::Ok,\n'
' Header(header::ContentType("{contenttype}".parse().unwrap())),\n'
' &include_bytes!("{filepath}")[..]\n'
' )))), "{fname}"\n'
' );'
.format(
route=os.path.relpath(abs_filepath, static_path),
fname=os.path.basename(abs_filepath),
contenttype=contenttype,
filepath=os.path.relpath(abs_filepath, os.path.dirname(output_file))
))
w(' r')
w('}')
|
adrian/feed2me | refs/heads/master | fix_path.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import sys
# credit: Nick Johnson of Google
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
|
2014c2g23/2015cda-w17 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/dummy/__init__.py | 693 | #
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
]
#
# Imports
#
import threading
import sys
import weakref
#brython fix me
#import array
from multiprocessing.dummy.connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue
#
#
#
class DummyProcess(threading.Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._pid = None
self._children = weakref.WeakKeyDictionary()
self._start_called = False
self._parent = current_process()
def start(self):
assert self._parent is current_process()
self._start_called = True
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
threading.Thread.start(self)
@property
def exitcode(self):
if self._start_called and not self.is_alive():
return 0
else:
return None
#
#
#
Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()
def active_children():
children = current_process()._children
for p in list(children):
if not p.is_alive():
children.pop(p, None)
return list(children)
def freeze_support():
pass
#
#
#
class Namespace(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return 'Namespace(%s)' % str.join(', ', temp)
dict = dict
list = list
#brython fix me
#def Array(typecode, sequence, lock=True):
# return array.array(typecode, sequence)
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def _get(self):
return self._value
def _set(self, value):
self._value = value
value = property(_get, _set)
def __repr__(self):
return '<%r(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
def Manager():
return sys.modules[__name__]
def shutdown():
pass
def Pool(processes=None, initializer=None, initargs=()):
from multiprocessing.pool import ThreadPool
return ThreadPool(processes, initializer, initargs)
JoinableQueue = Queue
|
40223117cda/2015_w11 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/logging/__init__.py | 733 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, io, traceback, warnings, weakref
from string import Template
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning',
'getLogRecordFactory', 'setLogRecordFactory', 'lastResort']
try:
import threading
except ImportError: #pragma: no cover
threading = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
if hasattr(sys, '_getframe'):
currentframe = lambda: sys._getframe(3)
else: #pragma: no cover
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = True
#
# If you don't want threading information in the log, set this to zero
#
logThreads = True
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = True
#
# If you don't want process information in the log, set this to zero
#
logProcesses = True
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if threading:
_lock = threading.RLock()
else: #pragma: no cover
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None, sinfo=None, **kwargs):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warning('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.stack_info = sinfo
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - int(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and threading:
self.thread = threading.get_ident()
self.threadName = threading.current_thread().name
else: # pragma: no cover
self.thread = None
self.threadName = None
if not logMultiprocessing: # pragma: no cover
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except Exception: #pragma: no cover
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = str(self.msg)
if self.args:
msg = msg % self.args
return msg
#
# Determine which class to use when instantiating log records.
#
_logRecordFactory = LogRecord
def setLogRecordFactory(factory):
"""
Set the factory to be used when instantiating a log record.
:param factory: A callable which will be called to instantiate
a log record.
"""
global _logRecordFactory
_logRecordFactory = factory
def getLogRecordFactory():
"""
Return the factory to be used when instantiating a log record.
"""
return _logRecordFactory
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = _logRecordFactory(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class PercentStyle(object):
default_format = '%(message)s'
asctime_format = '%(asctime)s'
asctime_search = '%(asctime)'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
def usesTime(self):
return self._fmt.find(self.asctime_search) >= 0
def format(self, record):
return self._fmt % record.__dict__
class StrFormatStyle(PercentStyle):
default_format = '{message}'
asctime_format = '{asctime}'
asctime_search = '{asctime'
def format(self, record):
return self._fmt.format(**record.__dict__)
class StringTemplateStyle(PercentStyle):
default_format = '${message}'
asctime_format = '${asctime}'
asctime_search = '${asctime}'
def __init__(self, fmt):
self._fmt = fmt or self.default_format
self._tpl = Template(self._fmt)
def usesTime(self):
fmt = self._fmt
return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0
def format(self, record):
return self._tpl.substitute(**record.__dict__)
_STYLES = {
'%': PercentStyle,
'{': StrFormatStyle,
'$': StringTemplateStyle
}
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None, style='%'):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, :meth:`str.format` (``{}``) formatting or
:class:`string.Template` formatting in your format string.
.. versionchanged: 3.2
Added the ``style`` parameter.
"""
if style not in _STYLES:
raise ValueError('Style must be one of: %s' % ','.join(
_STYLES.keys()))
self._style = _STYLES[style](fmt)
self._fmt = self._style._fmt
self.datefmt = datefmt
default_time_format = '%Y-%m-%d %H:%M:%S'
default_msec_format = '%s,%03d'
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime(self.default_time_format, ct)
s = self.default_msec_format % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = io.StringIO()
tb = ei[2]
# See issues #9427, #1553375. Commented out for now.
#if getattr(self, 'fullstack', False):
# traceback.print_stack(tb.tb_frame.f_back, file=sio)
traceback.print_exception(ei[0], ei[1], tb, None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._style.usesTime()
def formatMessage(self, record):
return self._style.format(record)
def formatStack(self, stack_info):
"""
This method is provided as an extension point for specialized
formatting of stack information.
The input data is a string as returned from a call to
:func:`traceback.print_stack`, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
"""
return stack_info
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = self.formatMessage(record)
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
s = s + record.exc_text
if record.stack_info:
if s[-1:] != "\n":
s = s + "\n"
s = s + self.formatStack(record.stack_info)
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return True
elif self.name == record.name:
return True
elif record.name.find(self.name, 0, self.nlen) != 0:
return False
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
.. versionchanged: 3.2
Allow filters to be just callables.
"""
rv = True
for f in self.filters:
if hasattr(f, 'filter'):
result = f.filter(record)
else:
result = f(record) # assume callable - will raise if not
if not result:
rv = False
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. If _acquireLock is None, assume this is the case and do
# nothing.
if (_acquireLock is not None and _handlerList is not None and
_releaseLock is not None):
_acquireLock()
try:
if wr in _handlerList:
_handlerList.remove(wr)
finally:
_releaseLock()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if threading:
self.lock = threading.RLock()
else: #pragma: no cover
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler. level must be an int or a str.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError: #pragma: no cover
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
terminator = '\n'
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
stream.write(msg)
stream.write(self.terminator)
self.flush()
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
if self.stream:
self.flush()
if hasattr(self.stream, "close"):
self.stream.close()
StreamHandler.close(self)
self.stream = None
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
return open(self.baseFilename, self.mode, encoding=self.encoding)
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
class _StderrHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr, but always uses
whatever sys.stderr is currently set to rather than the value of
sys.stderr at handler construction time.
"""
def __init__(self, level=NOTSET):
"""
Initialize the handler.
"""
Handler.__init__(self, level)
@property
def stream(self):
return sys.stderr
_defaultLastResort = _StderrHandler(WARNING)
lastResort = _defaultLastResort
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
if alogger not in self.loggerMap:
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = False
self.loggerDict = {}
self.loggerClass = None
self.logRecordFactory = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, str):
raise TypeError('A logger name must be a string')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def setLogRecordFactory(self, factory):
"""
Set the factory to be used when instantiating a log record with this
Manager.
"""
self.logRecordFactory = factory
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = True
self.handlers = []
self.disabled = False
def setLevel(self, level):
"""
Set the logging level of this logger. level must be an int or a str.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = True
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, int):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func,
sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
sinfo = None
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func, sinfo = self.findCaller(stack_info)
except ValueError: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else: # pragma: no cover
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args,
exc_info, func, extra, sinfo)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def hasHandlers(self):
"""
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the
logger hierarchy. Return True if a handler was found, else False.
Stop searching up the hierarchy whenever a logger with the "propagate"
attribute set to zero is found - that will be the last logger which
is checked for the existence of handlers.
"""
c = self
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0):
if lastResort:
if record.levelno >= lastResort.level:
lastResort.handle(record)
elif raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = True
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
#
# Boilerplate convenience methods
#
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger.
"""
self.log(DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger.
"""
self.log(INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger.
"""
self.log(WARNING, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
warnings.warn("The 'warn' method is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
self.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger.
"""
self.log(ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger.
"""
kwargs["exc_info"] = True
self.log(ERROR, msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger.
"""
self.log(CRITICAL, msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def setLevel(self, level):
"""
Set the specified level on the underlying logger.
"""
self.logger.setLevel(level)
def getEffectiveLevel(self):
"""
Get the effective level for the underlying logger.
"""
return self.logger.getEffectiveLevel()
def hasHandlers(self):
"""
See if the underlying logger has any handlers.
"""
return self.logger.hasHandlers()
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, :meth:`str.format` and :class:`string.Template`
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root handler. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
.. versionchanged:: 3.2
Added the ``style`` parameter.
.. versionchanged:: 3.3
Added the ``handlers`` parameter. A ``ValueError`` is now thrown for
incompatible arguments (e.g. ``handlers`` specified together with
``filename``/``filemode``, or ``filename``/``filemode`` specified
together with ``stream``, or ``handlers`` specified together with
``stream``.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
handlers = kwargs.get("handlers")
if handlers is None:
if "stream" in kwargs and "filename" in kwargs:
raise ValueError("'stream' and 'filename' should not be "
"specified together")
else:
if "stream" in kwargs or "filename" in kwargs:
raise ValueError("'stream' or 'filename' should not be "
"specified together with 'handlers'")
if handlers is None:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
h = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
h = StreamHandler(stream)
handlers = [h]
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
style = kwargs.get("style", '%')
fmt = Formatter(fs, dfs, style)
for h in handlers:
if h.formatter is None:
h.setFormatter(fmt)
root.addHandler(h)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger. If the logger
has no handlers, call basicConfig() to add a console handler with a
pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger, with exception
information. If the logger has no handlers, basicConfig() is called to add
a console handler with a pre-defined format.
"""
kwargs['exc_info'] = True
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
warnings.warn("The 'warn' function is deprecated, "
"use 'warning' instead", DeprecationWarning, 2)
warning(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger. If the logger has
no handlers, call basicConfig() to add a console handler with a pre-defined
format.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger. If
the logger has no handlers, call basicConfig() to add a console handler
with a pre-defined format.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
hgl888/blink-crosswalk-efl | refs/heads/efl/crosswalk-10/39.0.2171.19 | Source/bindings/scripts/code_generator_v8.py | 9 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate Blink V8 bindings (.h and .cpp files).
If run itself, caches Jinja templates (and creates dummy file for build,
since cache filenames are unpredictable and opaque).
This module is *not* concurrency-safe without care: bytecode caching creates
a race condition on cache *write* (crashes if one process tries to read a
partially-written cache). However, if you pre-cache the templates (by running
the module itself), then you can parallelize compiling individual files, since
cache *reading* is safe.
Input: An object of class IdlDefinitions, containing an IDL interface X
Output: V8X.h and V8X.cpp
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import os
import posixpath
import re
import sys
# Path handling for libraries and templates
# Paths have to be normalized because Jinja uses the exact template path to
# determine the hash used in the cache filename, and we need a pre-caching step
# to be concurrency-safe. Use absolute path because __file__ is absolute if
# module is imported, and relative if executed directly.
# If paths differ between pre-caching and individual file compilation, the cache
# is regenerated, which causes a race condition and breaks concurrent build,
# since some compile processes will try to read the partially written cache.
module_path, module_filename = os.path.split(os.path.realpath(__file__))
third_party_dir = os.path.normpath(os.path.join(
module_path, os.pardir, os.pardir, os.pardir, os.pardir))
templates_dir = os.path.normpath(os.path.join(
module_path, os.pardir, 'templates'))
# Make sure extension is .py, not .pyc or .pyo, so doesn't depend on caching
module_pyname = os.path.splitext(module_filename)[0] + '.py'
# jinja2 is in chromium's third_party directory.
# Insert at 1 so at front to override system libraries, and
# after path[0] == invoking script dir
sys.path.insert(1, third_party_dir)
import jinja2
import idl_types
from idl_types import IdlType
import v8_callback_interface
import v8_dictionary
from v8_globals import includes, interfaces
import v8_interface
import v8_types
from v8_utilities import capitalize, cpp_name, conditional_string, v8_class_name
from utilities import KNOWN_COMPONENTS
def render_template(interface_info, header_template, cpp_template,
template_context):
template_context['code_generator'] = module_pyname
# Add includes for any dependencies
template_context['header_includes'] = sorted(
template_context['header_includes'])
includes.update(interface_info.get('dependencies_include_paths', []))
template_context['cpp_includes'] = sorted(includes)
header_text = header_template.render(template_context)
cpp_text = cpp_template.render(template_context)
return header_text, cpp_text
class CodeGeneratorBase(object):
"""Base class for v8 bindings generator and IDL dictionary impl generator"""
def __init__(self, interfaces_info, cache_dir, output_dir):
interfaces_info = interfaces_info or {}
self.interfaces_info = interfaces_info
self.jinja_env = initialize_jinja_env(cache_dir)
self.output_dir = output_dir
# Set global type info
idl_types.set_ancestors(interfaces_info['ancestors'])
IdlType.set_callback_interfaces(interfaces_info['callback_interfaces'])
IdlType.set_dictionaries(interfaces_info['dictionaries'])
IdlType.set_implemented_as_interfaces(interfaces_info['implemented_as_interfaces'])
IdlType.set_garbage_collected_types(interfaces_info['garbage_collected_interfaces'])
IdlType.set_will_be_garbage_collected_types(interfaces_info['will_be_garbage_collected_interfaces'])
v8_types.set_component_dirs(interfaces_info['component_dirs'])
def generate_code(self, definitions, definition_name):
"""Returns .h/.cpp code as ((path, content)...)."""
# Set local type info
IdlType.set_callback_functions(definitions.callback_functions.keys())
IdlType.set_enums((enum.name, enum.values)
for enum in definitions.enumerations.values())
return self.generate_code_internal(definitions, definition_name)
def generate_code_internal(self, definitions, definition_name):
# This should be implemented in subclasses.
raise NotImplementedError()
class CodeGeneratorV8(CodeGeneratorBase):
def __init__(self, interfaces_info, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, interfaces_info, cache_dir, output_dir)
def output_paths(self, definition_name):
header_path = posixpath.join(self.output_dir,
'V8%s.h' % definition_name)
cpp_path = posixpath.join(self.output_dir, 'V8%s.cpp' % definition_name)
return header_path, cpp_path
def generate_code_internal(self, definitions, definition_name):
if definition_name in definitions.interfaces:
return self.generate_interface_code(
definitions, definition_name,
definitions.interfaces[definition_name])
if definition_name in definitions.dictionaries:
return self.generate_dictionary_code(
definitions, definition_name,
definitions.dictionaries[definition_name])
raise ValueError('%s is not in IDL definitions' % definition_name)
def generate_interface_code(self, definitions, interface_name, interface):
# Store other interfaces for introspection
interfaces.update(definitions.interfaces)
# Select appropriate Jinja template and contents function
if interface.is_callback:
header_template_filename = 'callback_interface.h'
cpp_template_filename = 'callback_interface.cpp'
interface_context = v8_callback_interface.callback_interface_context
else:
header_template_filename = 'interface.h'
cpp_template_filename = 'interface.cpp'
interface_context = v8_interface.interface_context
header_template = self.jinja_env.get_template(header_template_filename)
cpp_template = self.jinja_env.get_template(cpp_template_filename)
interface_info = self.interfaces_info[interface_name]
template_context = interface_context(interface)
# Add the include for interface itself
template_context['header_includes'].add(interface_info['include_path'])
header_text, cpp_text = render_template(
interface_info, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(interface_name)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
def generate_dictionary_code(self, definitions, dictionary_name,
dictionary):
header_template = self.jinja_env.get_template('dictionary_v8.h')
cpp_template = self.jinja_env.get_template('dictionary_v8.cpp')
template_context = v8_dictionary.dictionary_context(dictionary)
interface_info = self.interfaces_info[dictionary_name]
# Add the include for interface itself
template_context['header_includes'].add(interface_info['include_path'])
header_text, cpp_text = render_template(
interface_info, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(dictionary_name)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
class CodeGeneratorDictionaryImpl(CodeGeneratorBase):
def __init__(self, interfaces_info, cache_dir, output_dir):
CodeGeneratorBase.__init__(self, interfaces_info, cache_dir, output_dir)
def output_paths(self, definition_name, interface_info):
output_dir = posixpath.join(self.output_dir,
interface_info['relative_dir'])
header_path = posixpath.join(output_dir, '%s.h' % definition_name)
cpp_path = posixpath.join(output_dir, '%s.cpp' % definition_name)
return header_path, cpp_path
def generate_code_internal(self, definitions, definition_name):
if not definition_name in definitions.dictionaries:
raise ValueError('%s is not an IDL dictionary')
dictionary = definitions.dictionaries[definition_name]
interface_info = self.interfaces_info[definition_name]
header_template = self.jinja_env.get_template('dictionary_impl.h')
cpp_template = self.jinja_env.get_template('dictionary_impl.cpp')
template_context = v8_dictionary.dictionary_impl_context(
dictionary, self.interfaces_info)
header_text, cpp_text = render_template(
interface_info, header_template, cpp_template, template_context)
header_path, cpp_path = self.output_paths(
definition_name, interface_info)
return (
(header_path, header_text),
(cpp_path, cpp_text),
)
def initialize_jinja_env(cache_dir):
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_dir),
# Bytecode cache is not concurrency-safe unless pre-cached:
# if pre-cached this is read-only, but writing creates a race condition.
bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
keep_trailing_newline=True, # newline-terminate generated files
lstrip_blocks=True, # so can indent control flow tags
trim_blocks=True)
jinja_env.filters.update({
'blink_capitalize': capitalize,
'conditional': conditional_if_endif,
'exposed': exposed_if,
'per_context_enabled': per_context_enabled_if,
'runtime_enabled': runtime_enabled_if,
})
return jinja_env
def generate_indented_conditional(code, conditional):
# Indent if statement to level of original code
indent = re.match(' *', code).group(0)
return ('%sif (%s) {\n' % (indent, conditional) +
' %s\n' % '\n '.join(code.splitlines()) +
'%s}\n' % indent)
# [Conditional]
def conditional_if_endif(code, conditional_string):
# Jinja2 filter to generate if/endif directive blocks
if not conditional_string:
return code
return ('#if %s\n' % conditional_string +
code +
'#endif // %s\n' % conditional_string)
# [Exposed]
def exposed_if(code, exposed_test):
if not exposed_test:
return code
return generate_indented_conditional(code, 'context && (%s)' % exposed_test)
# [PerContextEnabled]
def per_context_enabled_if(code, per_context_enabled_function):
if not per_context_enabled_function:
return code
return generate_indented_conditional(code, 'context && context->isDocument() && %s(toDocument(context))' % per_context_enabled_function)
# [RuntimeEnabled]
def runtime_enabled_if(code, runtime_enabled_function_name):
if not runtime_enabled_function_name:
return code
return generate_indented_conditional(code, '%s()' % runtime_enabled_function_name)
################################################################################
def main(argv):
# If file itself executed, cache templates
try:
cache_dir = argv[1]
dummy_filename = argv[2]
except IndexError as err:
print 'Usage: %s CACHE_DIR DUMMY_FILENAME' % argv[0]
return 1
# Cache templates
jinja_env = initialize_jinja_env(cache_dir)
template_filenames = [filename for filename in os.listdir(templates_dir)
# Skip .svn, directories, etc.
if filename.endswith(('.cpp', '.h'))]
for template_filename in template_filenames:
jinja_env.get_template(template_filename)
# Create a dummy file as output for the build system,
# since filenames of individual cache files are unpredictable and opaque
# (they are hashes of the template path, which varies based on environment)
with open(dummy_filename, 'w') as dummy_file:
pass # |open| creates or touches the file
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
ales-erjavec/scipy | refs/heads/master | scipy/optimize/tests/test__root.py | 127 | """
Unit tests for optimization routines from _root.py.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_
import numpy as np
from scipy.optimize import root
class TestRoot(object):
def test_tol_parameter(self):
# Check that the minimize() tol= argument does something
def func(z):
x, y = z
return np.array([x**3 - 1, y**3 - 1])
def dfunc(z):
x, y = z
return np.array([[3*x**2, 0], [0, 3*y**2]])
for method in ['hybr', 'lm', 'broyden1', 'broyden2', 'anderson',
'diagbroyden', 'krylov']:
if method in ('linearmixing', 'excitingmixing'):
# doesn't converge
continue
if method in ('hybr', 'lm'):
jac = dfunc
else:
jac = None
sol1 = root(func, [1.1,1.1], jac=jac, tol=1e-4, method=method)
sol2 = root(func, [1.1,1.1], jac=jac, tol=0.5, method=method)
msg = "%s: %s vs. %s" % (method, func(sol1.x), func(sol2.x))
assert_(sol1.success, msg)
assert_(sol2.success, msg)
assert_(abs(func(sol1.x)).max() < abs(func(sol2.x)).max(),
msg)
def test_minimize_scalar_coerce_args_param(self):
# github issue #3503
def func(z, f=1):
x, y = z
return np.array([x**3 - 1, y**3 - f])
root(func, [1.1, 1.1], args=1.5)
|
houseurmusic/my-swift | refs/heads/master | swift/__init__.py | 4 | import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.4', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
|
abinashk-inf/AstroBox | refs/heads/master | src/astroprint/__init__.py | 5 | # coding=utf-8
__author__ = "Daniel Arroyo <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.