repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
alsrgv/tensorflow | tensorflow/contrib/learn/python/learn/utils/export.py | 28 | 13975 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, training_util.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
checkpoint_management.latest_checkpoint(
estimator._model_dir))
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
| apache-2.0 |
hauxir/OpenBazaar-Server | daemon.py | 4 | 3692 | __author__ = 'chris'
import sys, os, time, atexit
from signal import SIGTERM
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
# pylint: disable=file-builtin
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args)
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self, *args):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| mit |
kernsuite-debian/lofar | LCU/StationTest/test/hbatest/determinepeak.py | 1 | 4055 | """ script for determing the peak in the spectrum
Andre 10 July 2009
Usage python3 ./determinepeak.py [# of RCUs]
"""
# INIT
import array
import operator
import os
import time
import sys
import math
import numpy
# Read directory with the files to processs
def open_dir(dirname) :
files = list(filter(os.path.isfile, os.listdir('.')))
# files.sort(key=lambda x: os.path.getmtime(x))
return files
def rm_files(dir_name, file) :
cmdstr = 'rm ' + file
os.popen3(cmdstr)
return
def rec_stat(dirname, num_rcu) :
os.popen("rspctl --statistics --duration=10 --integration=10 --select=0:" + str(num_rcu - 1) + " 2>/dev/null")
return
# Open file for processsing
def open_file(files, file_nr) :
# check if file is data file, no junk
if files[file_nr][-3:] == 'dat':
file_name = files[file_nr]
fileinfo = os.stat(file_name)
size = int(fileinfo.st_size)
f = open(file_name, 'rb')
max_frames = size / (512 * 8)
frames_to_process = max_frames
rcu_nr = int(files[file_nr][-6:-4])
# print 'File nr ' + str(file_nr) + ' RCU nr ' + str(rcu_nr) + ' ' + files[file_nr][-6:-4]
else :
frames_to_process = 0
f = open(files[file_nr], 'rb')
rcu_nr = 0
return f, frames_to_process, rcu_nr
# Read single frame from file
def read_frame(f):
sst_data = array.array('d')
sst_data.fromfile(f, 512)
sst_data = sst_data.tolist()
return sst_data
# switch on HBA tiles gentle
def switchon_hba() :
try:
os.popen3("rspctl --rcumode=5 --sel=0:31")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=32:63")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=64:95")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=96:127")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=128:159")
time.sleep(1)
os.popen3("rspctl --rcumode=5 --sel=160:191")
time.sleep(1)
except:
print("NL station")
os.popen("rspctl --rcuenable=1")
return
# Main loop
def main() :
sub_time = []
sub_file = []
dir_name = './hbadatatest/' # Work directory will be cleaned
if not(os.path.exists(dir_name)):
os.mkdir(dir_name)
rmfile = '*.log'
hba_elements = 16
sleeptime = 1
ctrl_string = '='
# read in arguments
if len(sys.argv) < 2 :
num_rcu = 96
else :
num_rcu = int(sys.argv[2])
print(' Number of RCUs is ' + str(num_rcu))
max_subband = list(range(0, num_rcu))
max_rfi = list(range(0, num_rcu))
os.chdir(dir_name)
# os.popen("rspctl --clock=200")
# print 'Clock is set to 200 MHz'
# time.sleep(10)
#---------------------------------------------
# capture reference data (all HBA elements off)
rm_files(dir_name, '*')
switchon_hba()
# os.popen("rspctl --rcumode=5 2>/dev/null")
# os.popen("rspctl --rcuenable=1 2>/dev/null")
for ind in range(hba_elements) :
ctrl_string = ctrl_string + '128,'
strlength = len(ctrl_string)
ctrl_string = ctrl_string[0:strlength - 1]
cmd_str = 'rspctl --hbadelay' + ctrl_string + ' 2>/dev/null'
os.popen(cmd_str)
print('Setting all HBA elements on (128)')
time.sleep(sleeptime)
print('Capture data')
rec_stat(dir_name, num_rcu)
# rm_files(dir_name,rmfile)
# get list of all files in dir_name
files = open_dir(dir_name)
# start searching for maxima for each RCU
for file_cnt in range(len(files)) :
f, frames_to_process, rcu_nr = open_file(files, file_cnt)
if frames_to_process > 0 :
sst_data = read_frame(f)
[maxval, subband_nr] = max((x, i) for i, x in enumerate(sst_data[1:]))
max_rfi[rcu_nr] = 10 * numpy.log10(maxval)
max_subband[rcu_nr] = subband_nr + 1
f.close
for rcuind in range(num_rcu) :
print('RCU ' + str(rcuind) + ' has max. RFI (' + str(round(max_rfi[rcuind], 1)) + ' dB) in subband ' + str(max_subband[rcuind]))
main()
| gpl-3.0 |
lixiangning888/whole_project | modules/signatures_merge_tmp/rat_pcclient.py | 3 | 1675 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 @threatlead
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class PcClientMutexes(Signature):
name = "rat_pcclient"
description = "创建常见PcClient互斥量(mutex)或相应的文件变动"
severity = 3
categories = ["rat"]
families = ["pcclient", "nex"]
authors = ["threatlead"]
references = ["https://malwr.com/analysis/MDIxN2NhMjg4MTg2NDY4MWIyNTE0Zjk5MTY1OGU4YzE/"]
minimum = "0.5"
def run(self):
indicators = [
"BKLANG.*",
"VSLANG.*",
]
for indicator in indicators:
if self.check_mutex(pattern=indicator, regex=True):
return True
indicators = [
".*\\\\syslog.dat",
".*\\\\.*_lang.ini",
".*\\\\[0-9]+_lang.dll",
".*\\\\[0-9]+_res.tmp",
]
for indicator in indicators:
if self.check_file(pattern=indicator, regex=True):
return True
return False
| lgpl-3.0 |
40223145c2g18/c2g18 | wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/dom/expatbuilder.py | 733 | 35733 | """Facility to use the Expat parser to load a minidom instance
from a string or file.
This avoids all the overhead of SAX and pulldom to gain performance.
"""
# Warning!
#
# This module is tightly bound to the implementation details of the
# minidom DOM and can't be used with other DOM implementations. This
# is due, in part, to a lack of appropriate methods in the DOM (there is
# no way to create Entity and Notation nodes via the DOM Level 2
# interface), and for performance. The later is the cause of some fairly
# cryptic code.
#
# Performance hacks:
#
# - .character_data_handler() has an extra case in which continuing
# data is appended to an existing Text node; this can be a
# speedup since pyexpat can break up character data into multiple
# callbacks even though we set the buffer_text attribute on the
# parser. This also gives us the advantage that we don't need a
# separate normalization pass.
#
# - Determining that a node exists is done using an identity comparison
# with None rather than a truth test; this avoids searching for and
# calling any methods on the node object if it exists. (A rather
# nice speedup is achieved this way as well!)
from xml.dom import xmlbuilder, minidom, Node
from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE
from xml.parsers import expat
from xml.dom.minidom import _append_child, _set_attribute_node
from xml.dom.NodeFilter import NodeFilter
TEXT_NODE = Node.TEXT_NODE
CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE
DOCUMENT_NODE = Node.DOCUMENT_NODE
FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT
FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP
FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT
theDOMImplementation = minidom.getDOMImplementation()
# Expat typename -> TypeInfo
_typeinfo_map = {
"CDATA": minidom.TypeInfo(None, "cdata"),
"ENUM": minidom.TypeInfo(None, "enumeration"),
"ENTITY": minidom.TypeInfo(None, "entity"),
"ENTITIES": minidom.TypeInfo(None, "entities"),
"ID": minidom.TypeInfo(None, "id"),
"IDREF": minidom.TypeInfo(None, "idref"),
"IDREFS": minidom.TypeInfo(None, "idrefs"),
"NMTOKEN": minidom.TypeInfo(None, "nmtoken"),
"NMTOKENS": minidom.TypeInfo(None, "nmtokens"),
}
class ElementInfo(object):
__slots__ = '_attr_info', '_model', 'tagName'
def __init__(self, tagName, model=None):
self.tagName = tagName
self._attr_info = []
self._model = model
def __getstate__(self):
return self._attr_info, self._model, self.tagName
def __setstate__(self, state):
self._attr_info, self._model, self.tagName = state
def getAttributeType(self, aname):
for info in self._attr_info:
if info[1] == aname:
t = info[-2]
if t[0] == "(":
return _typeinfo_map["ENUM"]
else:
return _typeinfo_map[info[-2]]
return minidom._no_type
def getAttributeTypeNS(self, namespaceURI, localName):
return minidom._no_type
def isElementContent(self):
if self._model:
type = self._model[0]
return type not in (expat.model.XML_CTYPE_ANY,
expat.model.XML_CTYPE_MIXED)
else:
return False
def isEmpty(self):
if self._model:
return self._model[0] == expat.model.XML_CTYPE_EMPTY
else:
return False
def isId(self, aname):
for info in self._attr_info:
if info[1] == aname:
return info[-2] == "ID"
return False
def isIdNS(self, euri, ename, auri, aname):
# not sure this is meaningful
return self.isId((auri, aname))
def _intern(builder, s):
return builder._intern_setdefault(s, s)
def _parse_ns_name(builder, name):
assert ' ' in name
parts = name.split(' ')
intern = builder._intern_setdefault
if len(parts) == 3:
uri, localname, prefix = parts
prefix = intern(prefix, prefix)
qname = "%s:%s" % (prefix, localname)
qname = intern(qname, qname)
localname = intern(localname, localname)
else:
uri, localname = parts
prefix = EMPTY_PREFIX
qname = localname = intern(localname, localname)
return intern(uri, uri), localname, prefix, qname
class ExpatBuilder:
"""Document builder that uses Expat to build a ParsedXML.DOM document
instance."""
def __init__(self, options=None):
if options is None:
options = xmlbuilder.Options()
self._options = options
if self._options.filter is not None:
self._filter = FilterVisibilityController(self._options.filter)
else:
self._filter = None
# This *really* doesn't do anything in this case, so
# override it with something fast & minimal.
self._finish_start_element = id
self._parser = None
self.reset()
def createParser(self):
"""Create a new parser object."""
return expat.ParserCreate()
def getParser(self):
"""Return the parser object, creating a new one if needed."""
if not self._parser:
self._parser = self.createParser()
self._intern_setdefault = self._parser.intern.setdefault
self._parser.buffer_text = True
self._parser.ordered_attributes = True
self._parser.specified_attributes = True
self.install(self._parser)
return self._parser
def reset(self):
"""Free all data structures used during DOM construction."""
self.document = theDOMImplementation.createDocument(
EMPTY_NAMESPACE, None, None)
self.curNode = self.document
self._elem_info = self.document._elem_info
self._cdata = False
def install(self, parser):
"""Install the callbacks needed to build the DOM into the parser."""
# This creates circular references!
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.first_element_handler
parser.EndElementHandler = self.end_element_handler
parser.ProcessingInstructionHandler = self.pi_handler
if self._options.entities:
parser.EntityDeclHandler = self.entity_decl_handler
parser.NotationDeclHandler = self.notation_decl_handler
if self._options.comments:
parser.CommentHandler = self.comment_handler
if self._options.cdata_sections:
parser.StartCdataSectionHandler = self.start_cdata_section_handler
parser.EndCdataSectionHandler = self.end_cdata_section_handler
parser.CharacterDataHandler = self.character_data_handler_cdata
else:
parser.CharacterDataHandler = self.character_data_handler
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
parser.XmlDeclHandler = self.xml_decl_handler
parser.ElementDeclHandler = self.element_decl_handler
parser.AttlistDeclHandler = self.attlist_decl_handler
def parseFile(self, file):
"""Parse a document from a file object, returning the document
node."""
parser = self.getParser()
first_buffer = True
try:
while 1:
buffer = file.read(16*1024)
if not buffer:
break
parser.Parse(buffer, 0)
if first_buffer and self.document.documentElement:
self._setup_subset(buffer)
first_buffer = False
parser.Parse("", True)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def parseString(self, string):
"""Parse a document from a string, returning the document node."""
parser = self.getParser()
try:
parser.Parse(string, True)
self._setup_subset(string)
except ParseEscape:
pass
doc = self.document
self.reset()
self._parser = None
return doc
def _setup_subset(self, buffer):
"""Load the internal subset if there might be one."""
if self.document.doctype:
extractor = InternalSubsetExtractor()
extractor.parseString(buffer)
subset = extractor.getSubset()
self.document.doctype.internalSubset = subset
def start_doctype_decl_handler(self, doctypeName, systemId, publicId,
has_internal_subset):
doctype = self.document.implementation.createDocumentType(
doctypeName, publicId, systemId)
doctype.ownerDocument = self.document
_append_child(self.document, doctype)
self.document.doctype = doctype
if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT:
self.document.doctype = None
del self.document.childNodes[-1]
doctype = None
self._parser.EntityDeclHandler = None
self._parser.NotationDeclHandler = None
if has_internal_subset:
if doctype is not None:
doctype.entities._seq = []
doctype.notations._seq = []
self._parser.CommentHandler = None
self._parser.ProcessingInstructionHandler = None
self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
def end_doctype_decl_handler(self):
if self._options.comments:
self._parser.CommentHandler = self.comment_handler
self._parser.ProcessingInstructionHandler = self.pi_handler
if not (self._elem_info or self._filter):
self._finish_end_element = id
def pi_handler(self, target, data):
node = self.document.createProcessingInstruction(target, data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def character_data_handler_cdata(self, data):
childNodes = self.curNode.childNodes
if self._cdata:
if ( self._cdata_continue
and childNodes[-1].nodeType == CDATA_SECTION_NODE):
childNodes[-1].appendData(data)
return
node = self.document.createCDATASection(data)
self._cdata_continue = True
elif childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
value = node.data + data
node.data = value
return
else:
node = minidom.Text()
node.data = data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def character_data_handler(self, data):
childNodes = self.curNode.childNodes
if childNodes and childNodes[-1].nodeType == TEXT_NODE:
node = childNodes[-1]
node.data = node.data + data
return
node = minidom.Text()
node.data = node.data + data
node.ownerDocument = self.document
_append_child(self.curNode, node)
def entity_decl_handler(self, entityName, is_parameter_entity, value,
base, systemId, publicId, notationName):
if is_parameter_entity:
# we don't care about parameter entities for the DOM
return
if not self._options.entities:
return
node = self.document._create_entity(entityName, publicId,
systemId, notationName)
if value is not None:
# internal entity
# node *should* be readonly, but we'll cheat
child = self.document.createTextNode(value)
node.childNodes.append(child)
self.document.doctype.entities._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
del self.document.doctype.entities._seq[-1]
def notation_decl_handler(self, notationName, base, systemId, publicId):
node = self.document._create_notation(notationName, publicId, systemId)
self.document.doctype.notations._seq.append(node)
if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT:
del self.document.doctype.notations._seq[-1]
def comment_handler(self, data):
node = self.document.createComment(data)
_append_child(self.curNode, node)
if self._filter and self._filter.acceptNode(node) == FILTER_REJECT:
self.curNode.removeChild(node)
def start_cdata_section_handler(self):
self._cdata = True
self._cdata_continue = False
def end_cdata_section_handler(self):
self._cdata = False
self._cdata_continue = False
def external_entity_ref_handler(self, context, base, systemId, publicId):
return 1
def first_element_handler(self, name, attributes):
if self._filter is None and not self._elem_info:
self._finish_end_element = id
self.getParser().StartElementHandler = self.start_element_handler
self.start_element_handler(name, attributes)
def start_element_handler(self, name, attributes):
node = self.document.createElement(name)
_append_child(self.curNode, node)
self.curNode = node
if attributes:
for i in range(0, len(attributes), 2):
a = minidom.Attr(attributes[i], EMPTY_NAMESPACE,
None, EMPTY_PREFIX)
value = attributes[i+1]
a.value = value
a.ownerDocument = self.document
_set_attribute_node(node, a)
if node is not self.document.documentElement:
self._finish_start_element(node)
def _finish_start_element(self, node):
if self._filter:
# To be general, we'd have to call isSameNode(), but this
# is sufficient for minidom:
if node is self.document.documentElement:
return
filt = self._filter.startContainer(node)
if filt == FILTER_REJECT:
# ignore this node & all descendents
Rejecter(self)
elif filt == FILTER_SKIP:
# ignore this node, but make it's children become
# children of the parent node
Skipper(self)
else:
return
self.curNode = node.parentNode
node.parentNode.removeChild(node)
node.unlink()
# If this ever changes, Namespaces.end_element_handler() needs to
# be changed to match.
#
def end_element_handler(self, name):
curNode = self.curNode
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
def _finish_end_element(self, curNode):
info = self._elem_info.get(curNode.tagName)
if info:
self._handle_white_text_nodes(curNode, info)
if self._filter:
if curNode is self.document.documentElement:
return
if self._filter.acceptNode(curNode) == FILTER_REJECT:
self.curNode.removeChild(curNode)
curNode.unlink()
def _handle_white_text_nodes(self, node, info):
if (self._options.whitespace_in_element_content
or not info.isElementContent()):
return
# We have element type information and should remove ignorable
# whitespace; identify for text nodes which contain only
# whitespace.
L = []
for child in node.childNodes:
if child.nodeType == TEXT_NODE and not child.data.strip():
L.append(child)
# Remove ignorable whitespace from the tree.
for child in L:
node.removeChild(child)
def element_decl_handler(self, name, model):
info = self._elem_info.get(name)
if info is None:
self._elem_info[name] = ElementInfo(name, model)
else:
assert info._model is None
info._model = model
def attlist_decl_handler(self, elem, name, type, default, required):
info = self._elem_info.get(elem)
if info is None:
info = ElementInfo(elem)
self._elem_info[elem] = info
info._attr_info.append(
[None, name, None, None, default, 0, type, required])
def xml_decl_handler(self, version, encoding, standalone):
self.document.version = version
self.document.encoding = encoding
# This is still a little ugly, thanks to the pyexpat API. ;-(
if standalone >= 0:
if standalone:
self.document.standalone = True
else:
self.document.standalone = False
# Don't include FILTER_INTERRUPT, since that's checked separately
# where allowed.
_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP)
class FilterVisibilityController(object):
"""Wrapper around a DOMBuilderFilter which implements the checks
to make the whatToShow filter attribute work."""
__slots__ = 'filter',
def __init__(self, filter):
self.filter = filter
def startContainer(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.startContainer(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"startContainer() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
def acceptNode(self, node):
mask = self._nodetype_mask[node.nodeType]
if self.filter.whatToShow & mask:
val = self.filter.acceptNode(node)
if val == FILTER_INTERRUPT:
raise ParseEscape
if val == FILTER_SKIP:
# move all child nodes to the parent, and remove this node
parent = node.parentNode
for child in node.childNodes[:]:
parent.appendChild(child)
# node is handled by the caller
return FILTER_REJECT
if val not in _ALLOWED_FILTER_RETURNS:
raise ValueError(
"acceptNode() returned illegal value: " + repr(val))
return val
else:
return FILTER_ACCEPT
_nodetype_mask = {
Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT,
Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE,
Node.TEXT_NODE: NodeFilter.SHOW_TEXT,
Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION,
Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE,
Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY,
Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION,
Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT,
Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT,
Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE,
Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT,
Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION,
}
class FilterCrutch(object):
__slots__ = '_builder', '_level', '_old_start', '_old_end'
def __init__(self, builder):
self._level = 0
self._builder = builder
parser = builder._parser
self._old_start = parser.StartElementHandler
self._old_end = parser.EndElementHandler
parser.StartElementHandler = self.start_element_handler
parser.EndElementHandler = self.end_element_handler
class Rejecter(FilterCrutch):
__slots__ = ()
def __init__(self, builder):
FilterCrutch.__init__(self, builder)
parser = builder._parser
for name in ("ProcessingInstructionHandler",
"CommentHandler",
"CharacterDataHandler",
"StartCdataSectionHandler",
"EndCdataSectionHandler",
"ExternalEntityRefHandler",
):
setattr(parser, name, None)
def start_element_handler(self, *args):
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# restore the old handlers
parser = self._builder._parser
self._builder.install(parser)
parser.StartElementHandler = self._old_start
parser.EndElementHandler = self._old_end
else:
self._level = self._level - 1
class Skipper(FilterCrutch):
__slots__ = ()
def start_element_handler(self, *args):
node = self._builder.curNode
self._old_start(*args)
if self._builder.curNode is not node:
self._level = self._level + 1
def end_element_handler(self, *args):
if self._level == 0:
# We're popping back out of the node we're skipping, so we
# shouldn't need to do anything but reset the handlers.
self._builder._parser.StartElementHandler = self._old_start
self._builder._parser.EndElementHandler = self._old_end
self._builder = None
else:
self._level = self._level - 1
self._old_end(*args)
# framework document used by the fragment builder.
# Takes a string for the doctype, subset string, and namespace attrs string.
_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \
"http://xml.python.org/entities/fragment-builder/internal"
_FRAGMENT_BUILDER_TEMPLATE = (
'''\
<!DOCTYPE wrapper
%%s [
<!ENTITY fragment-builder-internal
SYSTEM "%s">
%%s
]>
<wrapper %%s
>&fragment-builder-internal;</wrapper>'''
% _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID)
class FragmentBuilder(ExpatBuilder):
"""Builder which constructs document fragments given XML source
text and a context node.
The context node is expected to provide information about the
namespace declarations which are in scope at the start of the
fragment.
"""
def __init__(self, context, options=None):
if context.nodeType == DOCUMENT_NODE:
self.originalDocument = context
self.context = context
else:
self.originalDocument = context.ownerDocument
self.context = context
ExpatBuilder.__init__(self, options)
def reset(self):
ExpatBuilder.reset(self)
self.fragment = None
def parseFile(self, file):
"""Parse a document fragment from a file object, returning the
fragment node."""
return self.parseString(file.read())
def parseString(self, string):
"""Parse a document fragment from a string, returning the
fragment node."""
self._source = string
parser = self.getParser()
doctype = self.originalDocument.doctype
ident = ""
if doctype:
subset = doctype.internalSubset or self._getDeclarations()
if doctype.publicId:
ident = ('PUBLIC "%s" "%s"'
% (doctype.publicId, doctype.systemId))
elif doctype.systemId:
ident = 'SYSTEM "%s"' % doctype.systemId
else:
subset = ""
nsattrs = self._getNSattrs() # get ns decls from node's ancestors
document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs)
try:
parser.Parse(document, 1)
except:
self.reset()
raise
fragment = self.fragment
self.reset()
## self._parser = None
return fragment
def _getDeclarations(self):
"""Re-create the internal subset from the DocumentType node.
This is only needed if we don't already have the
internalSubset as a string.
"""
doctype = self.context.ownerDocument.doctype
s = ""
if doctype:
for i in range(doctype.notations.length):
notation = doctype.notations.item(i)
if s:
s = s + "\n "
s = "%s<!NOTATION %s" % (s, notation.nodeName)
if notation.publicId:
s = '%s PUBLIC "%s"\n "%s">' \
% (s, notation.publicId, notation.systemId)
else:
s = '%s SYSTEM "%s">' % (s, notation.systemId)
for i in range(doctype.entities.length):
entity = doctype.entities.item(i)
if s:
s = s + "\n "
s = "%s<!ENTITY %s" % (s, entity.nodeName)
if entity.publicId:
s = '%s PUBLIC "%s"\n "%s"' \
% (s, entity.publicId, entity.systemId)
elif entity.systemId:
s = '%s SYSTEM "%s"' % (s, entity.systemId)
else:
s = '%s "%s"' % (s, entity.firstChild.data)
if entity.notationName:
s = "%s NOTATION %s" % (s, entity.notationName)
s = s + ">"
return s
def _getNSattrs(self):
return ""
def external_entity_ref_handler(self, context, base, systemId, publicId):
if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID:
# this entref is the one that we made to put the subtree
# in; all of our given input is parsed in here.
old_document = self.document
old_cur_node = self.curNode
parser = self._parser.ExternalEntityParserCreate(context)
# put the real document back, parse into the fragment to return
self.document = self.originalDocument
self.fragment = self.document.createDocumentFragment()
self.curNode = self.fragment
try:
parser.Parse(self._source, 1)
finally:
self.curNode = old_cur_node
self.document = old_document
self._source = None
return -1
else:
return ExpatBuilder.external_entity_ref_handler(
self, context, base, systemId, publicId)
class Namespaces:
"""Mix-in class for builders; adds support for namespaces."""
def _initNamespaces(self):
# list of (prefix, uri) ns declarations. Namespace attrs are
# constructed from this and added to the element's attrs.
self._ns_ordered_prefixes = []
def createParser(self):
"""Create a new namespace-handling parser."""
parser = expat.ParserCreate(namespace_separator=" ")
parser.namespace_prefixes = True
return parser
def install(self, parser):
"""Insert the namespace-handlers onto the parser."""
ExpatBuilder.install(self, parser)
if self._options.namespace_declarations:
parser.StartNamespaceDeclHandler = (
self.start_namespace_decl_handler)
def start_namespace_decl_handler(self, prefix, uri):
"""Push this namespace declaration on our storage."""
self._ns_ordered_prefixes.append((prefix, uri))
def start_element_handler(self, name, attributes):
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
else:
uri = EMPTY_NAMESPACE
qname = name
localname = None
prefix = EMPTY_PREFIX
node = minidom.Element(qname, uri, prefix, localname)
node.ownerDocument = self.document
_append_child(self.curNode, node)
self.curNode = node
if self._ns_ordered_prefixes:
for prefix, uri in self._ns_ordered_prefixes:
if prefix:
a = minidom.Attr(_intern(self, 'xmlns:' + prefix),
XMLNS_NAMESPACE, prefix, "xmlns")
else:
a = minidom.Attr("xmlns", XMLNS_NAMESPACE,
"xmlns", EMPTY_PREFIX)
a.value = uri
a.ownerDocument = self.document
_set_attribute_node(node, a)
del self._ns_ordered_prefixes[:]
if attributes:
node._ensure_attributes()
_attrs = node._attrs
_attrsNS = node._attrsNS
for i in range(0, len(attributes), 2):
aname = attributes[i]
value = attributes[i+1]
if ' ' in aname:
uri, localname, prefix, qname = _parse_ns_name(self, aname)
a = minidom.Attr(qname, uri, localname, prefix)
_attrs[qname] = a
_attrsNS[(uri, localname)] = a
else:
a = minidom.Attr(aname, EMPTY_NAMESPACE,
aname, EMPTY_PREFIX)
_attrs[aname] = a
_attrsNS[(EMPTY_NAMESPACE, aname)] = a
a.ownerDocument = self.document
a.value = value
a.ownerElement = node
if __debug__:
# This only adds some asserts to the original
# end_element_handler(), so we only define this when -O is not
# used. If changing one, be sure to check the other to see if
# it needs to be changed as well.
#
def end_element_handler(self, name):
curNode = self.curNode
if ' ' in name:
uri, localname, prefix, qname = _parse_ns_name(self, name)
assert (curNode.namespaceURI == uri
and curNode.localName == localname
and curNode.prefix == prefix), \
"element stack messed up! (namespace)"
else:
assert curNode.nodeName == name, \
"element stack messed up - bad nodeName"
assert curNode.namespaceURI == EMPTY_NAMESPACE, \
"element stack messed up - bad namespaceURI"
self.curNode = curNode.parentNode
self._finish_end_element(curNode)
class ExpatBuilderNS(Namespaces, ExpatBuilder):
"""Document builder that supports namespaces."""
def reset(self):
ExpatBuilder.reset(self)
self._initNamespaces()
class FragmentBuilderNS(Namespaces, FragmentBuilder):
"""Fragment builder that supports namespaces."""
def reset(self):
FragmentBuilder.reset(self)
self._initNamespaces()
def _getNSattrs(self):
"""Return string of namespace attributes from this element and
ancestors."""
# XXX This needs to be re-written to walk the ancestors of the
# context to build up the namespace information from
# declarations, elements, and attributes found in context.
# Otherwise we have to store a bunch more data on the DOM
# (though that *might* be more reliable -- not clear).
attrs = ""
context = self.context
L = []
while context:
if hasattr(context, '_ns_prefix_uri'):
for prefix, uri in context._ns_prefix_uri.items():
# add every new NS decl from context to L and attrs string
if prefix in L:
continue
L.append(prefix)
if prefix:
declname = "xmlns:" + prefix
else:
declname = "xmlns"
if attrs:
attrs = "%s\n %s='%s'" % (attrs, declname, uri)
else:
attrs = " %s='%s'" % (declname, uri)
context = context.parentNode
return attrs
class ParseEscape(Exception):
"""Exception raised to short-circuit parsing in InternalSubsetExtractor."""
pass
class InternalSubsetExtractor(ExpatBuilder):
"""XML processor which can rip out the internal document type subset."""
subset = None
def getSubset(self):
"""Return the internal subset as a string."""
return self.subset
def parseFile(self, file):
try:
ExpatBuilder.parseFile(self, file)
except ParseEscape:
pass
def parseString(self, string):
try:
ExpatBuilder.parseString(self, string)
except ParseEscape:
pass
def install(self, parser):
parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler
parser.StartElementHandler = self.start_element_handler
def start_doctype_decl_handler(self, name, publicId, systemId,
has_internal_subset):
if has_internal_subset:
parser = self.getParser()
self.subset = []
parser.DefaultHandler = self.subset.append
parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler
else:
raise ParseEscape()
def end_doctype_decl_handler(self):
s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n')
self.subset = s
raise ParseEscape()
def start_element_handler(self, name, attrs):
raise ParseEscape()
def parse(file, namespaces=True):
"""Parse a document, returning the resulting Document node.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseString(string, namespaces=True):
"""Parse a document from a string, returning the resulting
Document node.
"""
if namespaces:
builder = ExpatBuilderNS()
else:
builder = ExpatBuilder()
return builder.parseString(string)
def parseFragment(file, context, namespaces=True):
"""Parse a fragment of a document, given the context from which it
was originally extracted. context should be the parent of the
node(s) which are in the fragment.
'file' may be either a file name or an open file object.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
if isinstance(file, str):
fp = open(file, 'rb')
try:
result = builder.parseFile(fp)
finally:
fp.close()
else:
result = builder.parseFile(file)
return result
def parseFragmentString(string, context, namespaces=True):
"""Parse a fragment of a document from a string, given the context
from which it was originally extracted. context should be the
parent of the node(s) which are in the fragment.
"""
if namespaces:
builder = FragmentBuilderNS(context)
else:
builder = FragmentBuilder(context)
return builder.parseString(string)
def makeBuilder(options):
"""Create a builder based on an Options object."""
if options.namespaces:
return ExpatBuilderNS(options)
else:
return ExpatBuilder(options)
| gpl-2.0 |
kwrobert/heat-templates | hot/software-config/elements/heat-config-ansible/install.d/hook-ansible.py | 6 | 3615 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import subprocess
import sys
WORKING_DIR = os.environ.get('HEAT_ANSIBLE_WORKING',
'/var/lib/heat-config/heat-config-ansible')
OUTPUTS_DIR = os.environ.get('HEAT_ANSIBLE_OUTPUTS',
'/var/run/heat-config/heat-config-ansible')
def prepare_dir(path):
if not os.path.isdir(path):
os.makedirs(path, 0o700)
def main(argv=sys.argv):
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
prepare_dir(OUTPUTS_DIR)
prepare_dir(WORKING_DIR)
os.chdir(WORKING_DIR)
c = json.load(sys.stdin)
variables = {}
for input in c['inputs']:
variables[input['name']] = input.get('value', '')
fn = os.path.join(WORKING_DIR, '%s_playbook.yaml' % c['id'])
vars_filename = os.path.join(WORKING_DIR, '%s_variables.json' % c['id'])
heat_outputs_path = os.path.join(OUTPUTS_DIR, c['id'])
variables['heat_outputs_path'] = heat_outputs_path
config_text = c.get('config', '')
if not config_text:
log.warn("No 'config' input found, nothing to do.")
return
# Write 'variables' to file
with os.fdopen(os.open(
vars_filename, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as var_file:
json.dump(variables, var_file)
# Write the executable, 'config', to file
with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
f.write(c.get('config', '').encode('utf-8'))
cmd = [
'ansible-playbook',
'-i',
'localhost,',
fn,
'--extra-vars',
'@%s' % vars_filename
]
log.debug('Running %s' % (' '.join(cmd),))
try:
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
log.warn("ansible not installed yet")
return
stdout, stderr = subproc.communicate()
log.info('Return code %s' % subproc.returncode)
if stdout:
log.info(stdout)
if stderr:
log.info(stderr)
# TODO(stevebaker): Test if ansible returns any non-zero
# return codes in success.
if subproc.returncode:
log.error("Error running %s. [%s]\n" % (fn, subproc.returncode))
else:
log.info('Completed %s' % fn)
response = {}
for output in c.get('outputs') or []:
output_name = output['name']
try:
with open('%s.%s' % (heat_outputs_path, output_name)) as out:
response[output_name] = out.read()
except IOError:
pass
response.update({
'deploy_stdout': stdout,
'deploy_stderr': stderr,
'deploy_status_code': subproc.returncode,
})
json.dump(response, sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
palashahuja/myhdl | myhdl/test/core/test_misc.py | 3 | 1922 | # This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Run the unit tests for Signal """
import random
from random import randrange
random.seed(1) # random, but deterministic
from types import GeneratorType
import unittest
from unittest import TestCase
from myhdl import instance, instances
def A(n):
@instance
def logic():
yield None
return logic
def B(n):
@instance
def logic():
yield None
return logic
def C(n):
A_1 = A(1)
A_2 = A(2)
B_1 = B(1)
return A_1, A_2, B_1
g = 3
class InstancesTest(TestCase):
def testInstances(self):
@instance
def D_1():
yield None
d = 1
A_1 = A(1)
a = [1, 2]
B_1 = B(1)
b = "string"
C_1 = C(1)
c = {}
i = instances()
# can't just construct an expected list;
# that would become part of the instances also!
self.assertEqual(len(i), 4)
for e in (D_1, A_1, B_1, C_1):
self.assert_(e in i)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 |
Azenwrath/codeguild-labs | lab_distance_converstion.py | 1 | 1643 | #Lab: Distance Converter
#Student: Dana Stubkjaer
def convert(distance, unit1, unit2):
if unit1 == "mi":
if unit2 == "mi":
print (distance + " " + unit2)
if unit2 == "km":
print ((float(distance) * 1.60934))
if unit2 == "ft":
print ((float(distance) * 5280))
if unit2 == "m":
print ((float(distance) * 1609.34))
if unit1 == "km":
if unit2 == "km":
print (distance + " " + unit2)
if unit2 == "mi":
print ((float(distance) / 1.60934))
if unit2 == "ft":
print ((float(distance) * 5280))
if unit2 == "m":
print ((float(distance) * 1609.34))
if unit1 == "ft":
if unit2 == "ft":
print (distance + " " + unit2)
if unit2 == "mi":
print ((float(distance) * 0.000189394))
if unit2 == "km":
print ((float(distance) * 0.0003048))
if unit2 == "m":
print ((float(distance) * 0.3048))
if unit1 == "m":
if unit2 == "m":
print (distance + " " + unit2)
if unit2 == "mi":
print ((float(distance) * 0.000621371))
if unit2 == "ft":
print ((float(distance) * 3.28084))
if unit2 == "km":
print ((float(distance) * 1000))
distance = ""
unit1 = ""
unit2 = ""
distance = input("Please enter a distance: ")
unit1 = input ("Please enter the unit of distance: ")
unit2 = input("Please enter the desired unit of conversion: ")
convert(distance, unit1, unit2)
| gpl-3.0 |
spahan/unixdmoain | admin/janitor.py | 1 | 3452 | #!/usr/bin/env python2
# coding: utf-8
# THIS SOFTWARE IS LICENSED UNDER THE BSD LICENSE CONDITIONS.
# FOR LICENCE DETAILS SEE share/LICENSE.TXT
#
# (c) 2005-2009, Marco Hoehle <[email protected]>
# (c) 2010, Hanspeter Spalinger <[email protected]>
"""
housekeeping jobs, run this script as cronjob.
Do not forget to change KEYTAB to the location where
your janitor.keytab file is.
"""
from UniDomain import Classes
import re
def detect_bad_hosts(authen, db):
"""Searches for hosts which are missing from ldap or kerberos.
returns a array with problems."""
problems = []
krb_result = authen.list_hosts()
ldap_result = db.conn.search_s(config.ldapbase, ldap.SCOPE_SUBTREE, '(ObjectClass=udHost)', ['USID', 'FQDN', 'cn'])
ldap_hosts = set()
for id,atts in ldap_result:
# check primary attributes have single values. multiple ones indicate a unsuccessfull copy.
for at in atts:
if len(atts[at]) != 1:
problems.append( "Warning: Host %s has multiple %s Attributes!" % (id,at) )
if not id.startswith('cn=%s,' % atts['cn'][0]):
problems.append( "Warning: Host id and cn differ for %s!" % id )
if not atts['FQDN'][0].startswith('%s.' % atts['cn'][0]):
problems.append( "Warning: FQDN (%s) does not start with hostname (%s) for %s!" % (atts['FQDN'][0],atts['cn'][0],id) )
if not atts['FQDN'][0].endswith('.unibas.ch'):
problems.append( "Info: Host %s (%s) is not in domain unibas.ch." % (id, atts['FQDN'][0]) )
if not atts['USID'][0].startswith('host/%s@' % atts['FQDN'][0]):
problems.append( "Warning: Host USID (%s) and hostname (%s) different for %s!" % (atts['USID'][0], atts['cn'][0], id) )
if atts['FQDN'][0] in ldap_hosts:
problems.append( "ERROR!!: FQDN of %s (%s) is already taken by another host!" % (id, atts['FQDN'][0]) )
else:
ldap_hosts.add(atts['FQDN'][0])
krb_hosts = set()
for host in krb_result:
mo = re.match(r'host/([a-z0-9-.]*\.unibas\.ch)@UD.UNIBAS.CH', host)
if mo:
krb_hosts.add(mo.group(1))
else:
problems.append( "Warning: bad principal name for %s." % host )
for bad in krb_hosts-ldap_hosts:
problems.append( "Warning: host %s in kerberos but not in ldap!" % bad )
for bad in ldap_hosts-krb_hosts:
problems.append( "Warning: host %s in ldap but not in kerberos!" % bad )
return problems
def main():
config = Classes.Config(krb5keytab="/root/janitor/janitor.keytab",plugin_author='ldapdbadmin')
authen = Classes.Authen(config)
if not authen:
print "bad auth"
return
userid = authen.authenticate(user='janitor/admin')
if not userid: return
authen.kadmin()
author = Classes.Author(config)
db = author.authorize('janitor/admin')
config = Classes.Config(krb5keytab="/root/janitor/janitor.keytab",plugin_author='ldapdbadmin')
authen = Classes.Authen(config)
if not authen:
sys.exit(3)
userid = authen.authenticate()
if not userid:
sys.exit(4)
authen.kadmin()
author = Classes.Author(config)
if not author:
sys.exit(3)
db = author.authorize(userid.split('@')[0])
if not db:
sys.exit(4)
db.update_dnsSOA()
#FIXME: implement this.
#roger.search_expiredHosts()
if __name__ == "__main__":
main()
| bsd-3-clause |
DANA-Laboratory/CoolProp | Web/scripts/fluid_properties.Incompressibles.py | 3 | 6445 | from __future__ import print_function, division
import os.path
import CoolProp
import CoolProp.CoolProp
import subprocess
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg') #Force mpl to use a non-GUI backend
import matplotlib.pyplot as plt
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
#plots_path = os.path.join(web_dir,'fluid_properties','incompressibles_consistency')
plots_path = os.path.join(web_dir,'scripts','incompressibles_consistency')
checked = ["TVP1869", "T66"]
N = 50
p = 100e5
Pr = np.empty(N)
la = np.empty(N)
mu = np.empty(N)
cp = np.empty(N)
fig = plt.figure(tight_layout=True)
Pr_axis = fig.add_subplot(221)
la_axis = fig.add_subplot(222)
mu_axis = fig.add_subplot(223)
cp_axis = fig.add_subplot(224)
#Pr_axis = plt.subplot2grid((3,2), (0,0), rowspan=3)
#la_axis = plt.subplot2grid((3,2), (0,1))
#mu_axis = plt.subplot2grid((3,2), (1,1))
#cp_axis = plt.subplot2grid((3,2), (2,1))
Pr_axis.set_xlabel("Temperature $T$ / deg C")
Pr_axis.set_ylabel("Prandtl Number $Pr$")
#Pr_axis.set_ylim([0,10000])
#Pr_axis.set_yscale("log")
la_axis.set_xlabel("Temperature $T$ / deg C")
la_axis.set_ylabel("Thermal Conductivity $\lambda$ / W/m/K")
#la_axis.set_ylim([0,1])
mu_axis.set_xlabel("Temperature $T$ / deg C")
mu_axis.set_ylabel("Dynamic Viscosity $\mu$ / Pa s")
#mu_axis.set_ylim([0,1])
#mu_axis.set_yscale("log")
cp_axis.set_xlabel("Temperature $T$ / deg C")
cp_axis.set_ylabel("Isobaric Heat Capacity $c_p$ / J/kg/K")
#cp_axis.set_ylim([0,5000])
for fluid in CoolProp.__incompressibles_pure__ + CoolProp.__incompressibles_solution__:
#for fluid in CoolProp.__incompressibles_solution__:
#for fluid in CoolProp.__incompressibles_pure__:
skip_fluid = False
for ignored in ["example","iceea","icena","icepg"]:
if ignored in fluid.lower():
skip_fluid = True
if skip_fluid:
continue
state = CoolProp.AbstractState("INCOMP",fluid)
error = ""
for frac in [0.5,0.2,0.8,0.1,0.9]:
error = ""
try:
state.set_mass_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
try:
state.set_volu_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
try:
state.set_mole_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
pass
Tmin = 0.0
try:
Tmin = state.keyed_output(CoolProp.iT_freeze)
except:
pass
Tmin = max(state.Tmin(), Tmin)+1
Tmax = state.Tmax()
T = np.linspace(Tmin,Tmax, N)
for i, Ti in enumerate(T):
state.update(CoolProp.PT_INPUTS, p, Ti)
Pr[i] = state.Prandtl()
la[i] = state.conductivity()
mu[i] = state.viscosity()
cp[i] = state.cpmass()
#print(np.min(Pr), np.max(Pr))
Pr_axis.plot(T-273.15,Pr)
la_axis.plot(T-273.15,la)
mu_axis.plot(T-273.15,mu)
cp_axis.plot(T-273.15,cp)
if np.max(Pr)>10000:
if fluid not in checked:
print("Very high Prandtl number for {0:s} of {1:f}".format(fluid,np.max(Pr)))
if np.min(Pr)<0.0:
if fluid not in checked:
print("Very low Prandtl number for {0:s} of {1:f}".format(fluid,np.min(Pr)))
if np.max(la)>0.8:
if fluid not in checked:
print("Very high thermal conductivity for {0:s} of {1:f}".format(fluid,np.max(la)))
if np.min(la)<0.3:
if fluid not in checked:
print("Very low thermal conductivity for {0:s} of {1:f}".format(fluid,np.min(la)))
if np.max(mu)>0.2:
if fluid not in checked:
print("Very high viscosity for {0:s} of {1:f}".format(fluid,np.max(mu)))
if np.min(mu)<1e-8:
if fluid not in checked:
print("Very low viscosity for {0:s} of {1:f}".format(fluid,np.min(mu)))
if np.max(cp)>5000:
if fluid not in checked:
print("Very high heat capacity for {0:s} of {1:f}".format(fluid,np.max(cp)))
if np.min(cp)<1000:
if fluid not in checked:
print("Very low heat capacity for {0:s} of {1:f}".format(fluid,np.min(cp)))
#for fluid in CoolProp.__fluids__:
for fluid in ["Water"]:
state = CoolProp.AbstractState("HEOS",fluid)
Tmin = max(state.Tmin(), Pr_axis.get_xlim()[0]+273.15)
Tmax = min(state.Tmax(), Pr_axis.get_xlim()[1]+273.15)
T = np.linspace(Tmin, Tmax, N)
for i, Ti in enumerate(T):
try:
state.update(CoolProp.QT_INPUTS, 0, Ti)
p = state.p() + 1e5
except:
p = state.p_critical() + 1e5
Pr[i] = np.nan
la[i] = np.nan
mu[i] = np.nan
cp[i] = np.nan
try:
state.update(CoolProp.PT_INPUTS, p, Ti)
try:
Pr[i] = state.Prandtl()
except Exception as e:
print(e.message)
try:
la[i] = state.conductivity()
except Exception as e:
print(e.message)
try:
mu[i] = state.viscosity()
except Exception as e:
print(e.message)
try:
cp[i] = state.cpmass()
except Exception as e:
print(e.message)
except:
pass
#print(np.min(Pr), np.max(Pr))
if np.sum(np.isnan(Pr)) == 0:
Pr_axis.plot(T-273.15,Pr,alpha=0.5,ls=":")
else:
#print("Error: Prandtl undefined for "+fluid)
pass
if np.sum(np.isnan(la)) == 0:
la_axis.plot(T-273.15,la,alpha=0.5,ls=":")
else:
#print("Error: Conductivuty undefined for "+fluid)
pass
if np.sum(np.isnan(mu)) == 0:
mu_axis.plot(T-273.15,mu,alpha=0.5,ls=":")
else:
#print("Error: Viscosity undefined for "+fluid)
pass
if np.sum(np.isnan(cp)) == 0:
cp_axis.plot(T-273.15,cp,alpha=0.5,ls=":")
else:
#print("Error: Heat capacity undefined for "+fluid)
pass
fig.tight_layout()
fig.savefig(plots_path+'.pdf')
#fig.savefig(plots_path+'.png')
sys.exit(0)
| mit |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/samples/blob_samples_copy_blob.py | 1 | 1906 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: blob_samples_copy_blob.py
DESCRIPTION:
This sample demos how to copy a blob from a URL.
USAGE: python blob_samples_copy_blob.py
Set the environment variables with your own values before running the sample.
1) AZURE_STORAGE_CONNECTION_STRING - the connection string to your storage account
"""
from __future__ import print_function
import os
import sys
import time
from azure.storage.blob import BlobServiceClient
def main():
try:
CONNECTION_STRING = os.environ['AZURE_STORAGE_CONNECTION_STRING']
except KeyError:
print("AZURE_STORAGE_CONNECTION_STRING must be set.")
sys.exit(1)
status = None
blob_service_client = BlobServiceClient.from_connection_string(CONNECTION_STRING)
source_blob = "https://www.gutenberg.org/files/59466/59466-0.txt"
copied_blob = blob_service_client.get_blob_client("mycontainer", '59466-0.txt')
# Copy started
copied_blob.start_copy_from_url(source_blob)
for i in range(10):
props = copied_blob.get_blob_properties()
status = props.copy.status
print("Copy status: " + status)
if status == "success":
# Copy finished
break
time.sleep(10)
if status != "success":
# if not finished after 100s, cancel the operation
props = copied_blob.get_blob_properties()
print(props.copy.status)
copy_id = props.copy.id
copied_blob.abort_copy(copy_id)
props = copied_blob.get_blob_properties()
print(props.copy.status)
if __name__ == "__main__":
main()
| mit |
davenovak/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py | 350 | 1226 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a hard_dependency that is not exported is not pulled in as a
dependency for a target if the target does not explicitly specify a dependency
and none of its dependencies export the hard_dependency.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'dump_dependency_json':
test.skip_test('Skipping test; dependency JSON does not adjust ' \
'static libaries.\n')
test.run_gyp('hard_dependency.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
test.build('hard_dependency.gyp', 'd', chdir=chdir)
# Because 'c' does not export a hard_dependency, only the target 'd' should
# be built. This is because the 'd' target does not need the generated headers
# in order to be compiled.
test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_not_exist('c', type=test.STATIC_LIB, chdir=chdir)
test.built_file_must_exist('d', type=test.STATIC_LIB, chdir=chdir)
test.pass_test()
| gpl-3.0 |
danakj/chromium | third_party/logilab/logilab/common/interface.py | 137 | 2593 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Bases class for interfaces to provide 'light' interface handling.
TODO:
_ implements a check method which check that an object implements the
interface
_ Attribute objects
This module requires at least python 2.2
"""
__docformat__ = "restructuredtext en"
class Interface(object):
"""Base class for interfaces."""
def is_implemented_by(cls, instance):
return implements(instance, cls)
is_implemented_by = classmethod(is_implemented_by)
def implements(obj, interface):
"""Return true if the give object (maybe an instance or class) implements
the interface.
"""
kimplements = getattr(obj, '__implements__', ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
for implementedinterface in kimplements:
if issubclass(implementedinterface, interface):
return True
return False
def extend(klass, interface, _recurs=False):
"""Add interface to klass'__implements__ if not already implemented in.
If klass is subclassed, ensure subclasses __implements__ it as well.
NOTE: klass should be e new class.
"""
if not implements(klass, interface):
try:
kimplements = klass.__implements__
kimplementsklass = type(kimplements)
kimplements = list(kimplements)
except AttributeError:
kimplementsklass = tuple
kimplements = []
kimplements.append(interface)
klass.__implements__ = kimplementsklass(kimplements)
for subklass in klass.__subclasses__():
extend(subklass, interface, _recurs=True)
elif _recurs:
for subklass in klass.__subclasses__():
extend(subklass, interface, _recurs=True)
| bsd-3-clause |
kingvuplus/BH-SH4 | lib/python/Components/Sources/List.py | 39 | 2840 | from Source import Source
from Components.Element import cached
class List(Source, object):
"""The datasource of a listbox. Currently, the format depends on the used converter. So
if you put a simple string list in here, you need to use a StringList converter, if you are
using a "multi content list styled"-list, you need to use the StaticMultiList converter, and
setup the "fonts".
This has been done so another converter could convert the list to a different format, for example
to generate HTML."""
def __init__(self, list = [ ], enableWrapAround = False, item_height = 25, fonts = [ ]):
Source.__init__(self)
self.__list = list
self.onSelectionChanged = [ ]
self.item_height = item_height
self.fonts = fonts
self.disable_callbacks = False
self.enableWrapAround = enableWrapAround
self.__style = "default" # style might be an optional string which can be used to define different visualisations in the skin
def setList(self, list):
self.__list = list
self.changed((self.CHANGED_ALL,))
list = property(lambda self: self.__list, setList)
def entry_changed(self, index):
if not self.disable_callbacks:
self.downstream_elements.entry_changed(index)
def modifyEntry(self, index, data):
self.__list[index] = data
self.entry_changed(index)
def count(self):
return len(self.__list)
def selectionChanged(self, index):
if self.disable_callbacks:
return
# update all non-master targets
for x in self.downstream_elements:
if x is not self.master:
x.index = index
for x in self.onSelectionChanged:
x()
@cached
def getCurrent(self):
return self.master is not None and self.master.current
current = property(getCurrent)
def setIndex(self, index):
if self.master is not None:
self.master.index = index
self.selectionChanged(index)
@cached
def getIndex(self):
if self.master is not None:
return self.master.index
else:
return None
setCurrentIndex = setIndex
index = property(getIndex, setIndex)
def selectNext(self):
if self.getIndex() + 1 >= self.count():
if self.enableWrapAround:
self.index = 0
else:
self.index += 1
self.setIndex(self.index)
def selectPrevious(self):
if self.getIndex() - 1 < 0:
if self.enableWrapAround:
self.index = self.count() - 1
else:
self.index -= 1
self.setIndex(self.index)
@cached
def getStyle(self):
return self.__style
def setStyle(self, style):
if self.__style != style:
self.__style = style
self.changed((self.CHANGED_SPECIFIC, "style"))
style = property(getStyle, setStyle)
def updateList(self, list):
"""Changes the list without changing the selection or emitting changed Events"""
assert len(list) == len(self.__list)
old_index = self.index
self.disable_callbacks = True
self.list = list
self.index = old_index
self.disable_callbacks = False
| gpl-2.0 |
SzTk/Get-Mid-Point | get_mid_point/geocoding.py | 1 | 1561 | #coding: UTF-8
import sys
import traceback
from pygmapslib import PyGMaps, PyGMapsError
__all__ = ['GeocodingError', 'Geocoding', 'request']
class GeocodingError(Exception):
def __init__(self, error_status, params):
self.error_status = error_status
self.params = params
def __str__(self):
return self.error_status + '\n' + str(self.params)
def __unicode__(self):
return unicode(self.__str__())
class Geocoding(object):
def __init__(self, data):
self.data = data
def __unicode__(self):
addresses = ''
for result in self.data:
addresses = addresses + result['formatted_address'] + '\n'
return addresses
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
def request(address, sensor='false', gmaps = None):
query_url = 'https://maps.googleapis.com/maps/api/geocode/json?'
params = {
'address': address,
'sensor' : sensor
}
try:
if gmaps is None:
gmap_result = PyGMaps().get_data(query_url, params)
else:
gmap_result = gmaps.get_data(query_url, params)
except PyGMapsError as e:
print traceback.format_exc()
raise GeocodingError('HTTP STATUS ERROR', params)
if gmap_result['status'] != 'OK':
raise GeocodingError(gmap_result['status'], params)
return Geocoding(gmap_result['results'])
| lgpl-3.0 |
MakeHer/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_git_export.py | 66 | 7362 | """
Unittests for exporting to git via management command.
"""
import copy
import os
import shutil
import StringIO
import subprocess
import unittest
from uuid import uuid4
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test.utils import override_settings
from contentstore.tests.utils import CourseTestCase
import contentstore.git_export_utils as git_export_utils
from contentstore.git_export_utils import GitExportError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
FEATURES_WITH_EXPORT_GIT = settings.FEATURES.copy()
FEATURES_WITH_EXPORT_GIT['ENABLE_EXPORT_GIT'] = True
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
@override_settings(FEATURES=FEATURES_WITH_EXPORT_GIT)
class TestGitExport(CourseTestCase):
"""
Excercise the git_export django management command with various inputs.
"""
def setUp(self):
"""
Create/reinitialize bare repo and folders needed
"""
super(TestGitExport, self).setUp()
if not os.path.isdir(git_export_utils.GIT_REPO_EXPORT_DIR):
os.mkdir(git_export_utils.GIT_REPO_EXPORT_DIR)
self.addCleanup(shutil.rmtree, git_export_utils.GIT_REPO_EXPORT_DIR)
self.bare_repo_dir = '{0}/data/test_bare.git'.format(
os.path.abspath(settings.TEST_ROOT))
if not os.path.isdir(self.bare_repo_dir):
os.mkdir(self.bare_repo_dir)
self.addCleanup(shutil.rmtree, self.bare_repo_dir)
subprocess.check_output(['git', '--bare', 'init'],
cwd=self.bare_repo_dir)
def test_command(self):
"""
Test that the command interface works. Ignore stderr for clean
test output.
"""
with self.assertRaisesRegexp(CommandError, 'This script requires.*'):
call_command('git_export', 'blah', 'blah', 'blah', stderr=StringIO.StringIO())
with self.assertRaisesRegexp(CommandError, 'This script requires.*'):
call_command('git_export', stderr=StringIO.StringIO())
# Send bad url to get course not exported
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.URL_BAD)):
call_command('git_export', 'foo/bar/baz', 'silly', stderr=StringIO.StringIO())
# Send bad course_id to get course not exported
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.BAD_COURSE)):
call_command('git_export', 'foo/bar:baz', 'silly', stderr=StringIO.StringIO())
def test_error_output(self):
"""
Verify that error output is actually resolved as the correct string
"""
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.BAD_COURSE)):
call_command(
'git_export', 'foo/bar:baz', 'silly'
)
with self.assertRaisesRegexp(CommandError, unicode(GitExportError.URL_BAD)):
call_command(
'git_export', 'foo/bar/baz', 'silly'
)
def test_bad_git_url(self):
"""
Test several bad URLs for validation
"""
course_key = SlashSeparatedCourseKey('org', 'course', 'run')
with self.assertRaisesRegexp(GitExportError, unicode(GitExportError.URL_BAD)):
git_export_utils.export_to_git(course_key, 'Sillyness')
with self.assertRaisesRegexp(GitExportError, unicode(GitExportError.URL_BAD)):
git_export_utils.export_to_git(course_key, 'example.com:edx/notreal')
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.URL_NO_AUTH)):
git_export_utils.export_to_git(course_key, 'http://blah')
def test_bad_git_repos(self):
"""
Test invalid git repos
"""
test_repo_path = '{}/test_repo'.format(git_export_utils.GIT_REPO_EXPORT_DIR)
self.assertFalse(os.path.isdir(test_repo_path))
course_key = SlashSeparatedCourseKey('foo', 'blah', '100-')
# Test bad clones
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.CANNOT_PULL)):
git_export_utils.export_to_git(
course_key,
'https://user:[email protected]/test_repo.git')
self.assertFalse(os.path.isdir(test_repo_path))
# Setup good repo with bad course to test xml export
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.XML_EXPORT_FAIL)):
git_export_utils.export_to_git(
course_key,
'file://{0}'.format(self.bare_repo_dir))
# Test bad git remote after successful clone
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.CANNOT_PULL)):
git_export_utils.export_to_git(
course_key,
'https://user:[email protected]/r.git')
@unittest.skipIf(os.environ.get('GIT_CONFIG') or
os.environ.get('GIT_AUTHOR_EMAIL') or
os.environ.get('GIT_AUTHOR_NAME') or
os.environ.get('GIT_COMMITTER_EMAIL') or
os.environ.get('GIT_COMMITTER_NAME'),
'Global git override set')
def test_git_ident(self):
"""
Test valid course with and without user specified.
Test skipped if git global config override environment variable GIT_CONFIG
is set.
"""
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir),
'enigma'
)
expect_string = '{0}|{1}\n'.format(
git_export_utils.GIT_EXPORT_DEFAULT_IDENT['name'],
git_export_utils.GIT_EXPORT_DEFAULT_IDENT['email']
)
cwd = os.path.abspath(git_export_utils.GIT_REPO_EXPORT_DIR / 'test_bare')
git_log = subprocess.check_output(['git', 'log', '-1',
'--format=%an|%ae'], cwd=cwd)
self.assertEqual(expect_string, git_log)
# Make changes to course so there is something to commit
self.populate_course()
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir),
self.user.username
)
expect_string = '{0}|{1}\n'.format(
self.user.username,
self.user.email,
)
git_log = subprocess.check_output(
['git', 'log', '-1', '--format=%an|%ae'], cwd=cwd)
self.assertEqual(expect_string, git_log)
def test_no_change(self):
"""
Test response if there are no changes
"""
git_export_utils.export_to_git(
self.course.id,
'file://{0}'.format(self.bare_repo_dir)
)
with self.assertRaisesRegexp(GitExportError,
unicode(GitExportError.CANNOT_COMMIT)):
git_export_utils.export_to_git(
self.course.id, 'file://{0}'.format(self.bare_repo_dir))
| agpl-3.0 |
Endika/c2c-rd-addons | c2c_account_payment_extension/wizard/__init__.py | 4 | 1478 | # -*- coding: utf-8 -*-
##############################################
#
# Swing Entwicklung betrieblicher Informationssysteme GmbH
# (<http://www.swing-system.com>)
# Copyright (C) ChriCar Beteiligungs- und Beratungs- GmbH
# all rights reserved
# 08-JUN-2012 (GK) created
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1.17, USA.
#
###############################################
import account_payment_order
| agpl-3.0 |
trishnaguha/ansible | lib/ansible/modules/network/checkpoint/checkpoint_run_script.py | 30 | 3057 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: checkpoint_run_script
short_description: Run scripts on Checkpoint devices over Web Services API
description:
- Run scripts on Checkpoint devices.
All operations are performed over Web Services API.
version_added: "2.8"
author: "Ansible by Red Hat (@rcarrillocruz)"
options:
script_name:
description:
- Name of the script.
type: str
required: True
script:
description:
- Script body contents.
type: str
required: True
targets:
description:
- Targets the script should be run against. Can reference either name or UID.
type: list
required: True
"""
EXAMPLES = """
- name: Run script
checkpoint_run_script:
script_name: "List root"
script: ls -l /
targets:
- mycheckpointgw
"""
RETURN = """
checkpoint_run_script:
description: The checkpoint run script output.
returned: always.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.checkpoint.checkpoint import publish, install_policy
import json
def run_script(module, connection):
script_name = module.params['script_name']
script = module.params['script']
targets = module.params['targets']
payload = {'script-name': script_name,
'script': script,
'targets': targets}
code, response = connection.send_request('/web_api/run-script', payload)
return code, response
def main():
argument_spec = dict(
script_name=dict(type='str', required=True),
script=dict(type='str', required=True),
targets=dict(type='list', required=True)
)
module = AnsibleModule(argument_spec=argument_spec)
connection = Connection(module._socket_path)
code, response = run_script(module, connection)
result = {'changed': True}
if code == 200:
result['checkpoint_run_script'] = response
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
hpcugent/vsc-ldap | lib/vsc/ldap/filters.py | 1 | 9999 | # -*- coding: latin-1 -*-
#
# Copyright 2009-2021 Ghent University
#
# This file is part of vsc-ldap,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# the Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/hpcugent/vsc-ldap
#
# vsc-ldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-ldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-ldap. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module contains classes that allow constructing filter for an LDAP search in
a straightforward and intuitive manner.
@author: Andy Georges
@author: Stijn De Weirdt
Examples:
from vsc.ldap.filter import LdapFilter
>>> f = LdapFilter("x=4")
>>> g = LdapFilter("y=5")
>>> h = LdapFilter("z=3")
>>> print "f = %s" % f
f = (x=4)
>>> print "g = %s" % g
g = (y=5)
>>> print "h = %s" % h
h = (z=3)
>>> print "f & g -> %s" % (f & g)
f & g -> (&(x=4)(y=5))
>>> print "f -> %s" % f
f -> (x=4)
>>> print "g -> %s" % g
g -> (y=5)
>>> print "(f & g) | h -> %s" % ((f & g) | h)
(f & g) | h -> (|(&(x=4)(y=5))(z=3))
>>> print "f & g | h -> %s" % (f & g | h)
f & g | h -> (|(&(x=4)(y=5))(z=3))
>>> print "f & (g | h) -> %s" % (f & (g | h))
f & (g | h) -> (&(x=4)(|(y=5)(z=3)))
>>> print "f & g & h -> %s" % (f & g & h)
f & g & h -> (&(x=4)(y=5)(z=3))
>>> print "f & g & h | f -> %s" % (f & g & h | f)
f & g & h | f -> (|(&(x=4)(y=5)(z=3))(x=4))
>>> print "! f -> %s" % (f.negate())
! f -> (!(x=4) )
>>> print "fold & [f,g,h] -> %s" % LdapFilter.from_list(lambda x, y: x & y, [f, g, h])
fold & [f,g,h] -> (& (x=4) (y=5) (z=3))
>>> print "fold | [f,g,h] -> %s" % LdapFilter.from_list(lambda x, y: x | y, [f, g, h])
fold | [f,g,h] -> (| (x=4) (y=5) (z=3))
>>> print "fold & [f,g,h, g=1] -> %s" % LdapFilter.from_list(lambda x, y: x & y, [f, g, h, "g=1"])
fold & [f,g,h, g=1] -> (& (x=4) (y=5) (z=3) (g=1))
"""
import copy
from functools import reduce
from vsc.utils.timestamp import convert_timestamp
class LdapFilterError(Exception):
pass
class LdapFilter(object):
"""Representing an LDAP filter with operators between the filter values.
This is implemented as a tree, where the nodes are the operations, e.g.,
and, or, ... and the leaves are the values to finally concatenate to
a single filter when printing out the tree.
If you have multiple key value pairs that would wish to concatenate using a single
operator, for example to take the AND of them, the static from_list method will do
just that.
Note that for usage in a search, the resulting filter should be transformed into a
string, if the tools are not doing that automagically :)
Note that all operations are left associative.
"""
def __init__(self, value):
"""Initialises the filter with a single value to filter on."""
self.root = value
self.left = None
self.right = None
@staticmethod
def from_list(operator, ls):
"""Turns the given list into a filter using the given operator as the combinator.
@returns: LdapFilter instance representing the filter.
"""
if ls and len(ls) > 0:
if not isinstance(ls[0], LdapFilter):
initialiser = LdapFilter(ls[0])
else:
initialiser = ls[0]
return reduce(lambda x, y: operator(x, y), ls[1:], initialiser)
else:
raise LdapFilterError()
def __and__(self, value):
"""Return a new filter that is the logical and operator of this filter and the provided value.
It merges the currect filter with the value. The currect filter becomes the
left subtree of the new filter, the value becomes the right subtree.
@type value: This can be a string or an LdapFilter instance. In the former case,
first a new LdapFilter instance is made, such that all leaves are
actually LdapFilter instances.
@returns: the new filter instance
"""
if not isinstance(value, LdapFilter):
value = LdapFilter(value)
elif self == value:
value = copy.deepcopy(self)
return self._combine("&", value)
def __or__(self, value):
"""Return a new filter that is the logical or operator of this filter and the provided value.
It merges the currect filter with the value. The currect filter becomes the
left subtree of the new filter, the value becomes the right subtree.
@type value: This can be a string or an LdapFilter instance. In the former case,
first a new LdapFilter instance is made, such that all leaves are
actually LdapFilter instances.
@returns: the new filter instance
"""
if not isinstance(value, LdapFilter):
value = LdapFilter(value)
elif self == value:
value = copy.deepcopy(self)
return self._combine("|", value)
def negate(self):
"""Return a new filter that represents the negation of the current filter.
@returns: the new filter instance
"""
return self._combine("!", None)
def __str__(self):
"""Converts the LdapFilter instance to a string."""
return self._to_string()
def _to_string(self, previous_operator=None):
"""Pretty prints the filter, such that it can be used in the calls to the LDAP library."""
if self.left is None:
# single value, self.root should be a string not representing an operator
return "(%s)" % (self.root)
left_string = self.left._to_string(self.root)
if not self.right is None:
right_string = self.right._to_string(self.root)
else:
right_string = ""
if self.root == previous_operator:
return "%s%s" % (left_string, right_string)
else:
return "(%s%s%s)" % (self.root, left_string, right_string)
def _combine(self, operator, value=None):
"""Updates the tree with a new root, i.e., the given operator and
the value.
Thew original tree becomes the left child tree, the value the right.
@type value: Either an LdapFilter instance or None (default)
@returns: the updated instance.
"""
new = copy.deepcopy(self)
old = copy.copy(new)
new.root = operator
new.left = old
new.right = value
return new
class TimestampFilter(LdapFilter):
"""Represents a filter that aims to find entries that are compared to a given timestamp."""
def __init__(self, value, timestamp, comparator):
"""Initialise the filter.
@type value: string representing a filter
@type timestamp: string or datetime instance representing a timestamp. This value
will be converted to a format LDAP groks.
@type comparator: string representing a comparison operation, e.g., <=, >=
"""
super(TimestampFilter, self).__init__(value)
self.timestamp = convert_timestamp(timestamp)[1]
if comparator != '>=' and comparator != '<=':
raise LdapFilterError()
self.comparator = comparator
def __str__(self):
"""Converts the filter to an LDAP understood string."""
return "(& (modifyTimestamp%s%s) %s)" % (self.comparator,
self.timestamp,
super(TimestampFilter, self).__str__())
class NewerThanFilter(TimestampFilter):
"""Represents a filter that aims to find entries that are newer than the given timestamp."""
def __init__(self, value, timestamp):
"""Initialise the filter.
@type value: string representing a filter
@type timestamp: string or datetime instance representing a timestamp. This value
will be converted to a format LDAP groks.
"""
super(NewerThanFilter, self).__init__(value, timestamp, '>=')
class OlderThanFilter(TimestampFilter):
"""Represents a filter that aims to find entries that are older than the given timestamp."""
def __init__(self, value, timestamp):
"""Initialise the filter.
@type value: string representing a filter
@type timestamp: string or datetime instance representing a timestamp. This value
will be converted to a format LDAP groks.
"""
super(OlderThanFilter, self).__init__(value, timestamp, '<=')
class CnFilter(LdapFilter):
"""Representa a filter that matches a given common name."""
def __init__(self, cn):
super(CnFilter, self).__init__("cn=%s" % (cn))
class MemberFilter(LdapFilter):
"""Represents a filter that looks if a member is listed in the memberUid."""
def __init__(self, user_id):
super(MemberFilter, self).__init__("memberUid=%s" % (user_id))
class LoginFilter(LdapFilter):
"""Represents a filter that looks up a user based on his institute login name."""
def __init__(self, login):
super(LoginFilter, self).__init__("login=%s" % (login))
class InstituteFilter(LdapFilter):
"""Represents a filter that looks up a user based on his institute login name."""
def __init__(self, institute):
super(InstituteFilter, self).__init__("institute=%s" % (institute))
| gpl-2.0 |
drawks/ansible | lib/ansible/modules/storage/netapp/netapp_e_iscsi_target.py | 13 | 10627 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_iscsi_target
short_description: NetApp E-Series manage iSCSI target configuration
description:
- Configure the settings of an E-Series iSCSI target
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- The name/alias to assign to the iSCSI target.
- This alias is often used by the initiator software in order to make an iSCSI target easier to identify.
aliases:
- alias
ping:
description:
- Enable ICMP ping responses from the configured iSCSI ports.
type: bool
default: yes
chap_secret:
description:
- Enable Challenge-Handshake Authentication Protocol (CHAP), utilizing this value as the password.
- When this value is specified, we will always trigger an update (changed=True). We have no way of verifying
whether or not the password has changed.
- The chap secret may only use ascii characters with values between 32 and 126 decimal.
- The chap secret must be no less than 12 characters, but no more than 16 characters in length.
aliases:
- chap
- password
unnamed_discovery:
description:
- When an initiator initiates a discovery session to an initiator port, it is considered an unnamed
discovery session if the iSCSI target iqn is not specified in the request.
- This option may be disabled to increase security if desired.
type: bool
default: yes
log_path:
description:
- A local path (on the Ansible controller), to a file to be used for debug logging.
required: no
notes:
- Check mode is supported.
- Some of the settings are dependent on the settings applied to the iSCSI interfaces. These can be configured using
M(netapp_e_iscsi_interface).
- This module requires a Web Services API version of >= 1.3.
"""
EXAMPLES = """
- name: Enable ping responses and unnamed discovery sessions for all iSCSI ports
netapp_e_iscsi_target:
api_url: "https://localhost:8443/devmgr/v2"
api_username: admin
api_password: myPassword
ssid: "1"
validate_certs: no
name: myTarget
ping: yes
unnamed_discovery: yes
- name: Set the target alias and the CHAP secret
netapp_e_iscsi_target:
ssid: "{{ ssid }}"
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
name: myTarget
chap: password1234
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The iSCSI target settings have been updated.
alias:
description:
- The alias assigned to the iSCSI target.
returned: on success
sample: myArray
type: str
iqn:
description:
- The iqn (iSCSI Qualified Name), assigned to the iSCSI target.
returned: on success
sample: iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45
type: str
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class IscsiTarget(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['alias']),
ping=dict(type='bool', required=False, default=True),
chap_secret=dict(type='str', required=False, aliases=['chap', 'password'], no_log=True),
unnamed_discovery=dict(type='bool', required=False, default=True),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ping = args['ping']
self.chap_secret = args['chap_secret']
self.unnamed_discovery = args['unnamed_discovery']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
self.post_body = dict()
self.controllers = list()
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.chap_secret is not None:
if len(self.chap_secret) < 12 or len(self.chap_secret) > 16:
self.module.fail_json(msg="The provided CHAP secret is not valid, it must be between 12 and 16"
" characters in length.")
for c in self.chap_secret:
ordinal = ord(c)
if ordinal < 32 or ordinal > 126:
self.module.fail_json(msg="The provided CHAP secret is not valid, it may only utilize ascii"
" characters with decimal values between 32 and 126.")
@property
def target(self):
"""Provide information on the iSCSI Target configuration
Sample:
{
'alias': 'myCustomName',
'ping': True,
'unnamed_discovery': True,
'chap': False,
'iqn': 'iqn.1992-08.com.netapp:2800.000a132000b006d2000000005a0e8f45',
}
"""
target = dict()
try:
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/storagePoolBundle/target'
% self.ssid, headers=HEADERS, **self.creds)
# This likely isn't an iSCSI-enabled system
if not data:
self.module.fail_json(
msg="This storage-system doesn't appear to have iSCSI interfaces. Array Id [%s]." % (self.ssid))
data = data[0]
chap = any(
[auth for auth in data['configuredAuthMethods']['authMethodData'] if auth['authMethod'] == 'chap'])
target.update(dict(alias=data['alias']['iscsiAlias'],
iqn=data['nodeName']['iscsiNodeName'],
chap=chap))
(rc, data) = request(self.url + 'storage-systems/%s/graph/xpath-filter?query=/sa/iscsiEntityData'
% self.ssid, headers=HEADERS, **self.creds)
data = data[0]
target.update(dict(ping=data['icmpPingResponseEnabled'],
unnamed_discovery=data['unnamedDiscoverySessionsEnabled']))
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve the iSCSI target information. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return target
def apply_iscsi_settings(self):
"""Update the iSCSI target alias and CHAP settings"""
update = False
target = self.target
body = dict()
if self.name is not None and self.name != target['alias']:
update = True
body['alias'] = self.name
# If the CHAP secret was provided, we trigger an update.
if self.chap_secret is not None:
update = True
body.update(dict(enableChapAuthentication=True,
chapSecret=self.chap_secret))
# If no secret was provided, then we disable chap
elif target['chap']:
update = True
body.update(dict(enableChapAuthentication=False))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/target-settings' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def apply_target_changes(self):
update = False
target = self.target
body = dict()
if self.ping != target['ping']:
update = True
body['icmpPingResponseEnabled'] = self.ping
if self.unnamed_discovery != target['unnamed_discovery']:
update = True
body['unnamedDiscoverySessionsEnabled'] = self.unnamed_discovery
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
request(self.url + 'storage-systems/%s/iscsi/entity' % self.ssid, method='POST',
data=json.dumps(body), timeout=60, headers=HEADERS, **self.creds)
except Exception as err:
self.module.fail_json(
msg="Failed to update the iSCSI target settings. Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.apply_iscsi_settings()
update = self.apply_target_changes() or update
target = self.target
data = dict((key, target[key]) for key in target if key in ['iqn', 'alias'])
self.module.exit_json(msg="The interface settings have been updated.", changed=update, **data)
def __call__(self, *args, **kwargs):
self.update()
def main():
iface = IscsiTarget()
iface()
if __name__ == '__main__':
main()
| gpl-3.0 |
PaulWay/spacewalk | backend/satellite_tools/xmlDiskSource.py | 2 | 9003 | #
# Abstraction for an XML importer with a disk base
#
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import gzip
from spacewalk.common.fileutils import createPath
from spacewalk.common.rhnLib import hash_object_id
class MissingXmlDiskSourceFileError(Exception):
pass
class MissingXmlDiskSourceDirError(Exception):
pass
class DiskSource:
subdir = None
# Allow for compressed files by default
allow_compressed_files = 1
def __init__(self, mountPoint):
self.mountPoint = mountPoint
# Returns a data stream
def load(self):
# Returns a stream
filename = self._getFile()
return self._loadFile(filename)
def _getFile(self, create=0):
# Virtual
# pylint: disable=W0613,R0201
return None
def _loadFile(self, filename):
# Look for a gzip file first
if self.allow_compressed_files:
if filename[-3:] == '.gz' and os.path.exists(filename):
return gzip.open(filename, "rb")
if os.path.exists(filename + '.gz'):
return gzip.open(filename + ".gz", "rb")
if os.path.exists(filename):
return open(filename, "r")
raise MissingXmlDiskSourceFileError("unable to process file %s" % filename)
def _getDir(self, create=0):
dirname = "%s/%s" % (self.mountPoint, self.subdir)
if not create:
return dirname
if not os.path.exists(dirname):
createPath(dirname)
if not os.path.isdir(dirname):
raise MissingXmlDiskSourceDirError("%s is not a directory" % dirname)
return dirname
class ArchesDiskSource(DiskSource):
subdir = 'arches'
filename = 'arches.xml'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return os.path.join(dirname, self.filename)
class ArchesExtraDiskSource(ArchesDiskSource):
filename = "arches-extra.xml"
class ProductnamesDiskSource(DiskSource):
subdir = 'product_names'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/product_names.xml" % dirname
class ChannelFamilyDiskSource(DiskSource):
subdir = 'channel_families'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/channel_families.xml" % dirname
class OrgsDiskSource(DiskSource):
subdir = 'orgs'
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/orgs.xml" % dirname
class ChannelDiskSource(DiskSource):
subdir = 'channels'
def __init__(self, mountPoint):
DiskSource.__init__(self, mountPoint)
self.channel = None
def setChannel(self, channel):
self.channel = channel
def list(self):
# Lists the available channels
dirname = self._getDir(create=0)
if not os.path.isdir(dirname):
# No channels available
return []
return os.listdir(dirname)
def _getFile(self, create=0):
dirname = "%s/%s" % (self._getDir(create), self.channel)
if create and not os.path.isdir(dirname):
createPath(dirname)
return os.path.join(dirname, self._file_name())
@staticmethod
def _file_name():
return "channel.xml"
class ChannelCompsDiskSource(ChannelDiskSource):
@staticmethod
def _file_name():
return "comps.xml"
class ShortPackageDiskSource(DiskSource):
subdir = "packages_short"
def __init__(self, mountPoint):
DiskSource.__init__(self, mountPoint)
# Package ID
self.id = None
self._file_suffix = ".xml"
def setID(self, pid):
self.id = pid
# limited dict behaviour
def has_key(self, pid):
# Save the old id
old_id = self.id
self.id = pid
f = self._getFile()
# Restore the old id
self.id = old_id
if os.path.exists(f + '.gz') or os.path.exists(f):
return 1
return 0
def _getFile(self, create=0):
dirname = "%s/%s" % (self._getDir(create), self._hashID())
# Create the directoru if we have to
if create and not os.path.exists(dirname):
createPath(dirname)
return "%s/%s%s" % (dirname, self.id, self._file_suffix)
def _hashID(self):
# Hashes the package name
return hash_object_id(self.id, 2)
class PackageDiskSource(ShortPackageDiskSource):
subdir = "packages"
class SourcePackageDiskSource(ShortPackageDiskSource):
subdir = "source_packages"
class ErrataDiskSource(ShortPackageDiskSource):
subdir = "errata"
def _hashID(self):
# Hashes the erratum name
return hash_object_id(self.id, 1)
class BlacklistsDiskSource(DiskSource):
subdir = "blacklists"
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return "%s/blacklists.xml" % dirname
class BinaryRPMDiskSource(ShortPackageDiskSource):
subdir = "rpms"
def __init__(self, mountPoint):
ShortPackageDiskSource.__init__(self, mountPoint)
self._file_suffix = '.rpm'
class SourceRPMDiskSource(BinaryRPMDiskSource):
subdir = "srpms"
class KickstartDataDiskSource(DiskSource):
subdir = "kickstart_trees"
def __init__(self, mountPoint):
DiskSource.__init__(self, mountPoint)
self.id = None
def setID(self, ks_label):
self.id = ks_label
def _getFile(self, create=0):
dirname = self._getDir(create)
if create and not os.path.isdir(dirname):
createPath(dirname)
return os.path.join(dirname, self.id) + '.xml'
class KickstartFileDiskSource(KickstartDataDiskSource):
subdir = "kickstart_files"
allow_compressed_files = 0
def __init__(self, mountPoint):
KickstartDataDiskSource.__init__(self, mountPoint)
# the file's relative path
self.relative_path = None
def set_relative_path(self, relative_path):
self.relative_path = relative_path
def _getFile(self, create=0):
path = os.path.join(self._getDir(create), self.id,
self.relative_path)
dirname = os.path.dirname(path)
if create and not os.path.isdir(dirname):
createPath(dirname)
return path
class MetadataDiskSource:
def __init__(self, mountpoint):
self.mountpoint = mountpoint
@staticmethod
def is_disk_loader():
return True
def getArchesXmlStream(self):
return ArchesDiskSource(self.mountpoint).load()
def getArchesExtraXmlStream(self):
return ArchesExtraDiskSource(self.mountpoint).load()
def getChannelFamilyXmlStream(self):
return ChannelFamilyDiskSource(self.mountpoint).load()
def getOrgsXmlStream(self):
return OrgsDiskSource(self.mountpoint).load()
def getProductNamesXmlStream(self):
return ProductnamesDiskSource(self.mountpoint).load()
def getComps(self, label):
sourcer = ChannelCompsDiskSource(self.mountpoint)
sourcer.setChannel(label)
return sourcer.load()
def getChannelXmlStream(self):
sourcer = ChannelDiskSource(self.mountpoint)
channels = sourcer.list()
stream_list = []
for c in channels:
sourcer.setChannel(c)
stream_list.append(sourcer.load())
return stream_list
def getChannelShortPackagesXmlStream(self):
return ShortPackageDiskSource(self.mountpoint)
def getPackageXmlStream(self):
return PackageDiskSource(self.mountpoint)
def getSourcePackageXmlStream(self):
return SourcePackageDiskSource(self.mountpoint)
def getKickstartsXmlStream(self):
return KickstartDataDiskSource(self.mountpoint)
def getErrataXmlStream(self):
return ErrataDiskSource(self.mountpoint)
if __name__ == '__main__':
# TEST CODE
s = ChannelDiskSource("/tmp")
print s.list()
s.setChannel("redhat-linux-i386-7.2")
print s.load()
| gpl-2.0 |
Onager/plaso | tests/parsers/esedb_plugins/msie_webcache.py | 1 | 3763 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 1354)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp': '2014-05-12 07:30:25.486199',
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[567], expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 3)
self.assertEqual(storage_writer.number_of_events, 4014)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp': '2019-03-20 17:22:14.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jeffreylu9/django-cms | cms/test_utils/project/sampleapp/migrations/0002_auto_20141015_1057.py | 60 | 1264 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sampleapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='level',
),
migrations.RemoveField(
model_name='category',
name='lft',
),
migrations.RemoveField(
model_name='category',
name='rght',
),
migrations.RemoveField(
model_name='category',
name='tree_id',
),
migrations.AddField(
model_name='category',
name='depth',
field=models.PositiveIntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='category',
name='numchild',
field=models.PositiveIntegerField(default=0),
preserve_default=True,
),
migrations.AddField(
model_name='category',
name='path',
field=models.CharField(default='0001', unique=True, max_length=255),
preserve_default=False,
),
]
| bsd-3-clause |
vrsys/avango | examples/stereo_example/main.py | 1 | 3087 | import avango
import avango.script
from avango.script import field_has_changed
import avango.gua
from examples_common.GuaVE import GuaVE
STEREO_MODE = avango.gua.StereoMode.ANAGLYPH_RED_CYAN
# STEREO_MODE = avango.gua.StereoMode.ANAGLYPH_RED_GREEN
# STEREO_MODE = avango.gua.StereoMode.SIDE_BY_SIDE
# STEREO_MODE = avango.gua.StereoMode.CHECKERBOARD
# STEREO_MODE = avango.gua.StereoMode.NVIDIA_3D_VISION
# STEREO_MODE = avango.gua.StereoMode.QUAD_BUFFERED
class TimedRotate(avango.script.Script):
TimeIn = avango.SFFloat()
MatrixOut = avango.gua.SFMatrix4()
@field_has_changed(TimeIn)
def update(self):
self.MatrixOut.value = avango.gua.make_rot_mat(self.TimeIn.value*2.0, 0.0, 1.0, 0.0)
def start():
# setup scenegraph
graph = avango.gua.nodes.SceneGraph(Name = "scenegraph")
loader = avango.gua.nodes.TriMeshLoader()
monkey = loader.create_geometry_from_file("monkey", "data/objects/monkey.obj", avango.gua.LoaderFlags.DEFAULTS)
light = avango.gua.nodes.LightNode(
Type=avango.gua.LightType.POINT,
Name = "light",
Color = avango.gua.Color(1.0, 1.0, 1.0),
Brightness = 10
)
light.Transform.value = avango.gua.make_trans_mat(1, 1, 2) * avango.gua.make_scale_mat(15, 15, 15)
# setup viewing
width = 1024
height = 768
eye_size = avango.gua.Vec2ui(width, height)
window_size = avango.gua.Vec2ui(width, height)
left_pos = avango.gua.Vec2ui(0, 0)
right_pos = avango.gua.Vec2ui(0, 0)
if STEREO_MODE == avango.gua.StereoMode.SIDE_BY_SIDE:
right_pos.x = width + 1
window_size.x *= 2
#window = avango.gua.nodes.GlfwWindow(Size = window_size,
window = avango.gua.nodes.Window(Size = window_size,
LeftPosition = left_pos,
LeftResolution = eye_size,
RightPosition = right_pos,
RightResolution = eye_size,
StereoMode = STEREO_MODE)
avango.gua.register_window("window", window)
cam = avango.gua.nodes.CameraNode(
Name = "cam",
LeftScreenPath = "/screen",
RightScreenPath = "/screen",
SceneGraph = "scenegraph",
Resolution = eye_size,
EyeDistance = 0.06,
EnableStereo = True,
OutputWindowName = "window",
Transform = avango.gua.make_trans_mat(0.0, 0.0, 0.5)
# NearClip =
)
screen = avango.gua.nodes.ScreenNode(Name = "screen", Width = 0.5, Height = 0.5 * 0.3 / 0.4)
screen.Transform.value = avango.gua.make_trans_mat(0.0, 0.0, 2.5)
screen.Children.value = [cam]
graph.Root.value.Children.value = [monkey, light, screen]
#setup viewer
viewer = avango.gua.nodes.Viewer()
viewer.SceneGraphs.value = [graph]
viewer.Windows.value = [window]
viewer.DesiredFPS.value = 500.0
monkey_updater = TimedRotate()
timer = avango.nodes.TimeSensor()
monkey_updater.TimeIn.connect_from(timer.Time)
monkey.Transform.connect_from(monkey_updater.MatrixOut)
guaVE = GuaVE()
guaVE.start(locals(), globals())
viewer.run()
if __name__ == '__main__':
start()
| lgpl-3.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.4/django/contrib/gis/db/backends/oracle/operations.py | 52 | 12903 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from decimal import Decimal
from itertools import izip
from django.db.backends.oracle.base import DatabaseOperations
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.util import SpatialFunction
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
class SDOOperation(SpatialFunction):
"Base class for SDO* Oracle operations."
sql_template = "%(function)s(%(geo_col)s, %(geometry)s) %(operator)s '%(result)s'"
def __init__(self, func, **kwargs):
kwargs.setdefault('operator', '=')
kwargs.setdefault('result', 'TRUE')
super(SDOOperation, self).__init__(func, **kwargs)
class SDODistance(SpatialFunction):
"Class for Distance queries."
sql_template = ('%(function)s(%(geo_col)s, %(geometry)s, %(tolerance)s) '
'%(operator)s %(result)s')
dist_func = 'SDO_GEOM.SDO_DISTANCE'
def __init__(self, op, tolerance=0.05):
super(SDODistance, self).__init__(self.dist_func,
tolerance=tolerance,
operator=op, result='%s')
class SDODWithin(SpatialFunction):
dwithin_func = 'SDO_WITHIN_DISTANCE'
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, %%s) = 'TRUE'"
def __init__(self):
super(SDODWithin, self).__init__(self.dwithin_func)
class SDOGeomRelate(SpatialFunction):
"Class for using SDO_GEOM.RELATE."
relate_func = 'SDO_GEOM.RELATE'
sql_template = ("%(function)s(%(geo_col)s, '%(mask)s', %(geometry)s, "
"%(tolerance)s) %(operator)s '%(mask)s'")
def __init__(self, mask, tolerance=0.05):
# SDO_GEOM.RELATE(...) has a peculiar argument order: column, mask, geom, tolerance.
# Moreover, the runction result is the mask (e.g., 'DISJOINT' instead of 'TRUE').
super(SDOGeomRelate, self).__init__(self.relate_func, operator='=',
mask=mask, tolerance=tolerance)
class SDORelate(SpatialFunction):
"Class for using SDO_RELATE."
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
sql_template = "%(function)s(%(geo_col)s, %(geometry)s, 'mask=%(mask)s') = 'TRUE'"
relate_func = 'SDO_RELATE'
def __init__(self, mask):
if not self.mask_regex.match(mask):
raise ValueError('Invalid %s mask: "%s"' % (self.relate_func, mask))
super(SDORelate, self).__init__(self.relate_func, mask=mask)
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float, int, long)
class OracleOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = "django.contrib.gis.db.backends.oracle.compiler"
name = 'oracle'
oracle = True
valid_aggregates = dict([(a, None) for a in ('Union', 'Extent')])
Adapter = OracleSpatialAdapter
Adaptor = Adapter # Backwards-compatibility alias.
area = 'SDO_GEOM.SDO_AREA'
gml= 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent= 'SDO_AGGR_MBR'
intersection= 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_geom = 'SDO_UTIL.GETNUMELEM'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
distance_functions = {
'distance_gt' : (SDODistance('>'), dtypes),
'distance_gte' : (SDODistance('>='), dtypes),
'distance_lt' : (SDODistance('<'), dtypes),
'distance_lte' : (SDODistance('<='), dtypes),
'dwithin' : (SDODWithin(), dtypes),
}
geometry_functions = {
'contains' : SDOOperation('SDO_CONTAINS'),
'coveredby' : SDOOperation('SDO_COVEREDBY'),
'covers' : SDOOperation('SDO_COVERS'),
'disjoint' : SDOGeomRelate('DISJOINT'),
'intersects' : SDOOperation('SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'equals' : SDOOperation('SDO_EQUAL'),
'exact' : SDOOperation('SDO_EQUAL'),
'overlaps' : SDOOperation('SDO_OVERLAPS'),
'same_as' : SDOOperation('SDO_EQUAL'),
'relate' : (SDORelate, basestring), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches' : SDOOperation('SDO_TOUCH'),
'within' : SDOOperation('SDO_INSIDE'),
}
geometry_functions.update(distance_functions)
gis_terms = ['isnull']
gis_terms += geometry_functions.keys()
gis_terms = dict([(term, None) for term in gis_terms])
truncate_params = {'relate' : None}
def convert_extent(self, clob):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read())
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def convert_geom(self, clob, geo_field):
if clob:
return Geometry(clob.read(), geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'expression'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitue in
# the column name instead.
return placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"Returns the SQL WHERE clause for use in Oracle spatial SQL construction."
alias, col, db_type = lvalue
# Getting the quoted table name as `geo_col`.
geo_col = '%s.%s' % (qn(alias), qn(col))
# See if a Oracle Geometry function matches the lookup type next
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# 'dwithin' lookup types.
if isinstance(lookup_info, tuple):
# First element of tuple is lookup type, second element is the type
# of the expected argument (e.g., str, float)
sdo_op, arg_type = lookup_info
geom = value[0]
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, tuple):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
if len(value) != 2:
raise ValueError('2-element tuple required for %s lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
if lookup_type == 'relate':
# The SDORelate class handles construction for these queries,
# and verifies the mask argument.
return sdo_op(value[1]).as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Otherwise, just call the `as_sql` method on the SDOOperation instance.
return sdo_op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
else:
# Lookup info is a SDOOperation instance, whose `as_sql` method returns
# the SQL necessary for the geometry function call. For example:
# SDO_CONTAINS("geoapp_country"."poly", SDO_GEOMTRY('POINT(5 23)', 4326)) = 'TRUE'
return lookup_info.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__.lower()
if agg_name == 'union' : agg_name += 'agg'
if agg.is_extent:
sql_template = '%(function)s(%(field)s)'
else:
sql_template = '%(function)s(SDOAGGRTYPE(%(field)s,%(tolerance)s))'
sql_function = getattr(self, agg_name)
return self.select % sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import SpatialRefSys
return SpatialRefSys
def modify_insert_params(self, placeholders, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888
"""
# This code doesn't work for bulk insert cases.
assert len(placeholders) == 1
return [[param for pholder,param
in izip(placeholders[0], params[0]) if pholder != 'NULL'], ]
| apache-2.0 |
gromacs/copernicus | cpc/server/message/state.py | 2 | 4443 | # This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from cpc.util.conf.server_conf import ServerConf, ServerIdNotFoundException
from cpc.util.version import __version__
from server_command import ServerCommand
from server_command import ServerCommandError
from cpc.server.state.user_handler import UserLevel, UserHandler, UserError
from cpc.dataflow.lib import getModulesList
log=logging.getLogger(__name__)
class SCStop(ServerCommand):
"""Stop server command"""
def __init__(self):
ServerCommand.__init__(self, "stop")
def run(self, serverState, request, response):
log.info("Stop request received")
serverState.doQuit()
response.add('Quitting.')
class SCSaveState(ServerCommand):
"""Save the server state"""
def __init__(self):
ServerCommand.__init__(self, "save-state")
def run(self, serverState, request, response):
serverState.write()
response.add('Saved state.')
log.info("Save-state request received")
class SCPingServer(ServerCommand):
"""Test server command"""
def __init__(self):
ServerCommand.__init__(self, "ping")
def run(self, serverState, request, response):
response.add("OK")
class SCServerInfo(ServerCommand):
def __init__(self):
ServerCommand.__init__(self, "server-info")
def run(self, serverState, request, response):
conf = ServerConf()
info = dict()
info['fqdn'] = conf.getFqdn()
info['version'] = __version__
try:
conf.getServerId()
info['serverId'] = conf.getServerId()
info['server_secure_port'] = conf.getServerSecurePort()
info['client_secure_port'] = conf.getClientSecurePort()
except ServerIdNotFoundException as e:
info['serverId'] = "ERROR: %s"%e.str
response.add("",info)
class SCListServerItems(ServerCommand):
"""queue/running/heartbeat list command """
def __init__(self):
ServerCommand.__init__(self, "list")
def run(self, serverState, request, response):
toList = request.getParam('type')
retstr = ""
if toList == "queue":
list = serverState.getCmdQueue().list()
queue = []
for cmd in list:
queue.append(cmd.toJSON())
running = []
cmds = serverState.getRunningCmdList().getCmdList()
for cmd in cmds:
running.append(cmd.toJSON())
retstr = {"queue": queue, "running": running}
elif toList == "running":
running = []
cmds = serverState.getRunningCmdList().getCmdList()
for cmd in cmds:
running.append(cmd.toJSON())
retstr = running
elif toList == "heartbeats":
heartbeats = serverState.getRunningCmdList().toJSON() #.list()
retstr = heartbeats
elif toList == "users":
retstr = UserHandler().getUsersAsList()
elif toList == "modules":
retstr = getModulesList()
else:
raise ServerCommandError("Unknown item to list: '%s'" % toList)
response.add(retstr)
log.info("Listed %s" % toList)
class SCReadConf(ServerCommand):
"""Update the configuration based on new settings."""
def __init__(self):
ServerCommand.__init__(self, "readconf")
def run(self, serverState, request, response):
conf = ServerConf()
conf.reread()
response.add("Reread configuration.")
log.info("Reread configuration done")
| gpl-2.0 |
elancom/storm | storm-core/src/dev/resources/tester_bolt.py | 16 | 1272 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Python file uses the following encoding: utf-8
import storm
from random import random
class TesterBolt(storm.Bolt):
def initialize(self, conf, context):
storm.emit(['bolt initializing'])
def process(self, tup):
word = tup.values[0];
if (random() < 0.75):
storm.emit([word + 'lalala'], anchors=[tup])
storm.ack(tup)
else:
storm.log(word + ' randomly skipped!')
TesterBolt().run()
| apache-2.0 |
dneg/gaffer | python/GafferImageTest/SelectTest.py | 5 | 3240 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import IECore
import GafferImage
class SelectTest( unittest.TestCase ) :
rPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/redWithDataWindow.100x100.exr" )
gPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/greenWithDataWindow.100x100.exr" )
bPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/blueWithDataWindow.100x100.exr" )
# Do several tests to check the cache is working correctly:
def testHashPassThrough( self ) :
r1 = GafferImage.ImageReader()
r1["fileName"].setValue( self.rPath )
r2 = GafferImage.ImageReader()
r2["fileName"].setValue( self.gPath )
r3 = GafferImage.ImageReader()
r3["fileName"].setValue( self.bPath )
##########################################
# Test to see if the hash changes when we set the select plug.
##########################################
s = GafferImage.Select()
s["select"].setValue(1)
s["in"].setInput(r1["out"])
s["in1"].setInput(r2["out"])
s["in2"].setInput(r3["out"])
h1 = s["out"].image().hash()
h2 = r2["out"].image().hash()
self.assertEqual( h1, h2 )
s["select"].setValue(0)
h1 = s["out"].image().hash()
h2 = r1["out"].image().hash()
self.assertEqual( h1, h2 )
s["select"].setValue(2)
h1 = s["out"].image().hash()
h2 = r3["out"].image().hash()
self.assertEqual( h1, h2 )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
kphillisjr/burg | util/import_gcry.py | 6 | 17863 | #*
#* GRUB -- GRand Unified Bootloader
#* Copyright (C) 2009 Free Software Foundation, Inc.
#*
#* GRUB is free software: you can redistribute it and/or modify
#* it under the terms of the GNU General Public License as published by
#* the Free Software Foundation, either version 3 of the License, or
#* (at your option) any later version.
#*
#* GRUB is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#* GNU General Public License for more details.
#*
#* You should have received a copy of the GNU General Public License
#* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
#*
import re
import sys
import os
import datetime
import codecs
if len (sys.argv) < 3:
print ("Usage: %s SOURCE DESTINATION" % sys.argv[0])
exit (0)
indir = sys.argv[1]
outdir = sys.argv[2]
basedir = os.path.join (outdir, "lib/libgcrypt-grub")
try:
os.makedirs (basedir)
except:
print ("WARNING: %s already exists" % basedir)
cipher_dir_in = os.path.join (indir, "cipher")
cipher_dir_out = os.path.join (basedir, "cipher")
try:
os.makedirs (cipher_dir_out)
except:
print ("WARNING: %s already exists" % cipher_dir_out)
cipher_files = os.listdir (cipher_dir_in)
conf = codecs.open (os.path.join ("grub-core", "Makefile.gcry.def"), "w", "utf-8")
conf.write ("AutoGen definitions Makefile.tpl;\n\n")
confutil = codecs.open ("Makefile.utilgcry.def", "w", "utf-8")
confutil.write ("AutoGen definitions Makefile.tpl;\n\n")
confutil.write ("library = {\n");
confutil.write (" name = libgrubgcry.a;\n");
confutil.write (" cflags = '$(CFLAGS_GCRY)';\n");
confutil.write (" cppflags = '$(CPPFLAGS_GCRY)';\n");
confutil.write (" extra_dist = grub-core/lib/libgcrypt-grub/cipher/ChangeLog;\n");
confutil.write ("\n");
chlog = ""
modules = []
# Strictly speaking CRC32/CRC24 work on bytes so this value should be 1
# But libgcrypt uses 64. Let's keep the value for compatibility. Since
# noone uses CRC24/CRC32 for HMAC this is no problem
mdblocksizes = {"_gcry_digest_spec_crc32" : 64,
"_gcry_digest_spec_crc32_rfc1510" : 64,
"_gcry_digest_spec_crc24_rfc2440" : 64,
"_gcry_digest_spec_md4" : 64,
"_gcry_digest_spec_md5" : 64,
"_gcry_digest_spec_rmd160" : 64,
"_gcry_digest_spec_sha1" : 64,
"_gcry_digest_spec_sha224" : 64,
"_gcry_digest_spec_sha256" : 64,
"_gcry_digest_spec_sha384" : 128,
"_gcry_digest_spec_sha512" : 128,
"_gcry_digest_spec_tiger" : 64,
"_gcry_digest_spec_whirlpool" : 64}
cryptolist = codecs.open (os.path.join (cipher_dir_out, "crypto.lst"), "w", "utf-8")
# rijndael is the only cipher using aliases. So no need for mangling, just
# hardcode it
cryptolist.write ("RIJNDAEL: gcry_rijndael\n");
cryptolist.write ("RIJNDAEL192: gcry_rijndael\n");
cryptolist.write ("RIJNDAEL256: gcry_rijndael\n");
cryptolist.write ("AES128: gcry_rijndael\n");
cryptolist.write ("AES-128: gcry_rijndael\n");
cryptolist.write ("AES-192: gcry_rijndael\n");
cryptolist.write ("AES-256: gcry_rijndael\n");
cryptolist.write ("ADLER32: adler32\n");
cryptolist.write ("CRC64: crc64\n");
for cipher_file in cipher_files:
infile = os.path.join (cipher_dir_in, cipher_file)
outfile = os.path.join (cipher_dir_out, cipher_file)
if cipher_file == "ChangeLog":
continue
chlognew = " * %s" % cipher_file
if re.match ("(Manifest|Makefile\.am|ac\.c|cipher\.c|hash-common\.c|hmac-tests\.c|md\.c|pubkey\.c)$", cipher_file):
chlog = "%s%s: Removed\n" % (chlog, chlognew)
continue
# Autogenerated files. Not even worth mentionning in ChangeLog
if re.match ("Makefile\.in$", cipher_file):
continue
nch = False
if re.match (".*\.[ch]$", cipher_file):
isc = re.match (".*\.c$", cipher_file)
f = codecs.open (infile, "r", "utf-8")
fw = codecs.open (outfile, "w", "utf-8")
fw.write ("/* This file was automatically imported with \n")
fw.write (" import_gcry.py. Please don't modify it */\n")
fw.write ("#include <grub/dl.h>\n")
if cipher_file == "camellia.h":
fw.write ("#include <grub/misc.h>\n")
fw.write ("void camellia_setup128(const unsigned char *key, grub_uint32_t *subkey);\n")
fw.write ("void camellia_setup192(const unsigned char *key, grub_uint32_t *subkey);\n")
fw.write ("void camellia_setup256(const unsigned char *key, grub_uint32_t *subkey);\n")
fw.write ("void camellia_encrypt128(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_encrypt192(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_encrypt256(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_decrypt128(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_decrypt192(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("void camellia_decrypt256(const grub_uint32_t *subkey, grub_uint32_t *io);\n")
fw.write ("#define memcpy grub_memcpy\n")
# Whole libgcrypt is distributed under GPLv3+ or compatible
if isc:
fw.write ("GRUB_MOD_LICENSE (\"GPLv3+\");\n")
ciphernames = []
mdnames = []
hold = False
skip = False
skip2 = False
ismd = False
iscipher = False
iscryptostart = False
iscomma = False
isglue = False
skip_statement = False
if isc:
modname = cipher_file [0:len(cipher_file) - 2]
if re.match (".*-glue$", modname):
modname = modname.replace ("-glue", "")
isglue = True
modname = "gcry_%s" % modname
for line in f:
line = line
if skip_statement:
if not re.search (";", line) is None:
skip_statement = False
continue
if skip:
if line[0] == "}":
skip = False
continue
if skip2:
if not re.search (" *};", line) is None:
skip2 = False
continue
if iscryptostart:
s = re.search (" *\"([A-Z0-9_a-z]*)\"", line)
if not s is None:
sg = s.groups()[0]
cryptolist.write (("%s: %s\n") % (sg, modname))
iscryptostart = False
if ismd or iscipher:
if not re.search (" *};", line) is None:
if not iscomma:
fw.write (" ,\n")
fw.write ("#ifdef GRUB_UTIL\n");
fw.write (" .modname = \"%s\",\n" % modname);
fw.write ("#endif\n");
if ismd:
if not (mdname in mdblocksizes):
print ("ERROR: Unknown digest blocksize: %s\n"
% mdname)
exit (1)
fw.write (" .blocksize = %s\n"
% mdblocksizes [mdname])
ismd = False
iscipher = False
iscomma = not re.search (",$", line) is None
# Used only for selftests.
m = re.match ("(static byte|static unsigned char) (weak_keys_chksum)\[[0-9]*\] =", line)
if not m is None:
skip = True
fname = m.groups ()[1]
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
if hold:
hold = False
# We're optimising for size.
if not re.match ("(run_selftests|selftest|_gcry_aes_c.._..c|_gcry_[a-z0-9]*_hash_buffer|tripledes_set2keys|do_tripledes_set_extra_info|_gcry_rmd160_mixblock|serpent_test)", line) is None:
skip = True
if not re.match ("serpent_test", line) is None:
fw.write ("static const char *serpent_test (void) { return 0; }\n");
fname = re.match ("[a-zA-Z0-9_]*", line).group ()
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
else:
fw.write (holdline)
m = re.match ("# *include <(.*)>", line)
if not m is None:
chmsg = "Removed including of %s" % m.groups ()[0]
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s: %s" % (chlognew, chmsg)
nch = True
continue
m = re.match ("gcry_cipher_spec_t", line)
if isc and not m is None:
assert (not iscryptostart)
assert (not iscipher)
assert (not iscryptostart)
ciphername = line [len ("gcry_cipher_spec_t"):].strip ()
ciphername = re.match("[a-zA-Z0-9_]*",ciphername).group ()
ciphernames.append (ciphername)
iscipher = True
iscryptostart = True
m = re.match ("gcry_md_spec_t", line)
if isc and not m is None:
assert (not ismd)
assert (not iscipher)
assert (not iscryptostart)
mdname = line [len ("gcry_md_spec_t"):].strip ()
mdname = re.match("[a-zA-Z0-9_]*",mdname).group ()
mdnames.append (mdname)
ismd = True
iscryptostart = True
m = re.match ("static const char \*selftest.*;$", line)
if not m is None:
fname = line[len ("static const char \*"):]
fname = re.match ("[a-zA-Z0-9_]*", fname).group ()
chmsg = "(%s): Removed declaration." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
m = re.match ("(static const char( |)\*|static gpg_err_code_t|void|static int|static gcry_err_code_t)$", line)
if not m is None:
hold = True
holdline = line
continue
m = re.match ("static int tripledes_set2keys \(.*\);", line)
if not m is None:
continue
m = re.match ("static int tripledes_set2keys \(", line)
if not m is None:
skip_statement = True
continue
m = re.match ("cipher_extra_spec_t", line)
if isc and not m is None:
skip2 = True
fname = line[len ("cipher_extra_spec_t "):]
fname = re.match ("[a-zA-Z0-9_]*", fname).group ()
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
m = re.match ("md_extra_spec_t", line)
if isc and not m is None:
skip2 = True
fname = line[len ("md_extra_spec_t "):]
fname = re.match ("[a-zA-Z0-9_]*", fname).group ()
chmsg = "(%s): Removed." % fname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s %s" % (chlognew, chmsg)
nch = True
continue
fw.write (line)
if len (ciphernames) > 0 or len (mdnames) > 0:
if isglue:
modfiles = "lib/libgcrypt-grub/cipher/%s lib/libgcrypt-grub/cipher/%s" \
% (cipher_file, cipher_file.replace ("-glue.c", ".c"))
else:
modfiles = "lib/libgcrypt-grub/cipher/%s" % cipher_file
modules.append (modname)
chmsg = "(GRUB_MOD_INIT(%s)): New function\n" % modname
if nch:
chlognew = "%s\n %s" % (chlognew, chmsg)
else:
chlognew = "%s%s" % (chlognew, chmsg)
nch = True
fw.write ("\n\nGRUB_MOD_INIT(%s)\n" % modname)
fw.write ("{\n")
for ciphername in ciphernames:
chmsg = "Register cipher %s" % ciphername
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_cipher_register (&%s);\n" % ciphername)
for mdname in mdnames:
chmsg = "Register digest %s" % mdname
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_md_register (&%s);\n" % mdname)
fw.write ("}")
chmsg = "(GRUB_MOD_FINI(%s)): New function\n" % modname
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write ("\n\nGRUB_MOD_FINI(%s)\n" % modname)
fw.write ("{\n")
for ciphername in ciphernames:
chmsg = "Unregister cipher %s" % ciphername
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_cipher_unregister (&%s);\n" % ciphername)
for mdname in mdnames:
chmsg = "Unregister MD %s" % mdname
chlognew = "%s\n %s" % (chlognew, chmsg)
fw.write (" grub_md_unregister (&%s);\n" % mdname)
fw.write ("}\n")
conf.write ("module = {\n")
conf.write (" name = %s;\n" % modname)
for src in modfiles.split():
conf.write (" common = %s;\n" % src)
confutil.write (" common = grub-core/%s;\n" % src)
if modname == "gcry_rijndael" or modname == "gcry_md4" or modname == "gcry_md5" or modname == "gcry_rmd160" or modname == "gcry_sha1" or modname == "gcry_sha256" or modname == "gcry_sha512" or modname == "gcry_tiger":
# Alignment checked by hand
conf.write (" cflags = '$(CFLAGS_GCRY) -Wno-cast-align -Wno-strict-aliasing';\n");
else:
conf.write (" cflags = '$(CFLAGS_GCRY)';\n");
conf.write (" cppflags = '$(CPPFLAGS_GCRY)';\n");
conf.write ("};\n\n")
f.close ()
fw.close ()
if nch:
chlog = "%s%s\n" % (chlog, chlognew)
elif isc and cipher_file != "camellia.c":
print ("WARNING: C file isn't a module: %s" % cipher_file)
f.close ()
fw.close ()
os.remove (outfile)
chlog = "%s\n * %s: Removed" % (chlog, cipher_file)
continue
chlog = "%s%sSkipped unknown file\n" % (chlog, chlognew)
print ("WARNING: unknown file %s" % cipher_file)
cryptolist.close ()
chlog = "%s * crypto.lst: New file.\n" % chlog
outfile = os.path.join (cipher_dir_out, "types.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <grub/types.h>\n")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * types.h: New file.\n" % chlog
fw.close ()
outfile = os.path.join (cipher_dir_out, "memory.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * memory.h: New file.\n" % chlog
fw.close ()
outfile = os.path.join (cipher_dir_out, "cipher.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <grub/crypto.h>\n")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * cipher.h: Likewise.\n" % chlog
fw.close ()
outfile = os.path.join (cipher_dir_out, "g10lib.h")
fw=codecs.open (outfile, "w", "utf-8")
fw.write ("#include <cipher_wrap.h>\n")
chlog = "%s * g10lib.h: Likewise.\n" % chlog
fw.close ()
infile = os.path.join (cipher_dir_in, "ChangeLog")
outfile = os.path.join (cipher_dir_out, "ChangeLog")
conf.close ();
initfile = codecs.open (os.path.join (cipher_dir_out, "init.c"), "w", "utf-8")
initfile.write ("#include <grub/crypto.h>\n")
for module in modules:
initfile.write ("extern void grub_%s_init (void);\n" % module)
initfile.write ("extern void grub_%s_fini (void);\n" % module)
initfile.write ("\n")
initfile.write ("void\n")
initfile.write ("grub_gcry_init_all (void)\n")
initfile.write ("{\n")
for module in modules:
initfile.write (" grub_%s_init ();\n" % module)
initfile.write ("}\n")
initfile.write ("\n")
initfile.write ("void\n")
initfile.write ("grub_gcry_fini_all (void)\n")
initfile.write ("{\n")
for module in modules:
initfile.write (" grub_%s_fini ();\n" % module)
initfile.write ("}\n")
initfile.close ()
confutil.write (" common = grub-core/lib/libgcrypt-grub/cipher/init.c;\n")
confutil.write ("};\n");
confutil.close ();
f=codecs.open (infile, "r", "utf-8")
fw=codecs.open (outfile, "w", "utf-8")
dt = datetime.date.today ()
fw.write ("%04d-%02d-%02d Automatic import tool\n" % \
(dt.year,dt.month, dt.day))
fw.write ("\n")
fw.write (" Imported ciphers to GRUB\n")
fw.write ("\n")
fw.write (chlog)
fw.write ("\n")
for line in f:
fw.write (line)
f.close ()
fw.close ()
| gpl-3.0 |
ptisserand/ansible | lib/ansible/utils/module_docs_fragments/vyos.py | 58 | 3118 | #
# (c) 2015, Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
provider:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli)."
- For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols).
- HORIZONTALLINE
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide <network_guide>`
"""
| gpl-3.0 |
zhinaonet/sqlmap-z | lib/controller/handler.py | 1 | 4543 | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.common import Backend
from lib.core.data import conf
from lib.core.data import kb
from lib.core.dicts import DBMS_DICT
from lib.core.enums import DBMS
from lib.core.settings import MSSQL_ALIASES
from lib.core.settings import MYSQL_ALIASES
from lib.core.settings import ORACLE_ALIASES
from lib.core.settings import PGSQL_ALIASES
from lib.core.settings import SQLITE_ALIASES
from lib.core.settings import ACCESS_ALIASES
from lib.core.settings import FIREBIRD_ALIASES
from lib.core.settings import MAXDB_ALIASES
from lib.core.settings import SYBASE_ALIASES
from lib.core.settings import DB2_ALIASES
from lib.core.settings import HSQLDB_ALIASES
from lib.core.settings import INFORMIX_ALIASES
from lib.utils.sqlalchemy import SQLAlchemy
from plugins.dbms.mssqlserver import MSSQLServerMap
from plugins.dbms.mssqlserver.connector import Connector as MSSQLServerConn
from plugins.dbms.mysql import MySQLMap
from plugins.dbms.mysql.connector import Connector as MySQLConn
from plugins.dbms.oracle import OracleMap
from plugins.dbms.oracle.connector import Connector as OracleConn
from plugins.dbms.postgresql import PostgreSQLMap
from plugins.dbms.postgresql.connector import Connector as PostgreSQLConn
from plugins.dbms.sqlite import SQLiteMap
from plugins.dbms.sqlite.connector import Connector as SQLiteConn
from plugins.dbms.access import AccessMap
from plugins.dbms.access.connector import Connector as AccessConn
from plugins.dbms.firebird import FirebirdMap
from plugins.dbms.firebird.connector import Connector as FirebirdConn
from plugins.dbms.maxdb import MaxDBMap
from plugins.dbms.maxdb.connector import Connector as MaxDBConn
from plugins.dbms.sybase import SybaseMap
from plugins.dbms.sybase.connector import Connector as SybaseConn
from plugins.dbms.db2 import DB2Map
from plugins.dbms.db2.connector import Connector as DB2Conn
from plugins.dbms.hsqldb import HSQLDBMap
from plugins.dbms.hsqldb.connector import Connector as HSQLDBConn
from plugins.dbms.informix import InformixMap
from plugins.dbms.informix.connector import Connector as InformixConn
def setHandler():
"""
Detect which is the target web application back-end database
management system.
"""
items = [
(DBMS.MYSQL, MYSQL_ALIASES, MySQLMap, MySQLConn),
(DBMS.ORACLE, ORACLE_ALIASES, OracleMap, OracleConn),
(DBMS.PGSQL, PGSQL_ALIASES, PostgreSQLMap, PostgreSQLConn),
(DBMS.MSSQL, MSSQL_ALIASES, MSSQLServerMap, MSSQLServerConn),
(DBMS.SQLITE, SQLITE_ALIASES, SQLiteMap, SQLiteConn),
(DBMS.ACCESS, ACCESS_ALIASES, AccessMap, AccessConn),
(DBMS.FIREBIRD, FIREBIRD_ALIASES, FirebirdMap, FirebirdConn),
(DBMS.MAXDB, MAXDB_ALIASES, MaxDBMap, MaxDBConn),
(DBMS.SYBASE, SYBASE_ALIASES, SybaseMap, SybaseConn),
(DBMS.DB2, DB2_ALIASES, DB2Map, DB2Conn),
(DBMS.HSQLDB, HSQLDB_ALIASES, HSQLDBMap, HSQLDBConn),
(DBMS.INFORMIX, INFORMIX_ALIASES, InformixMap, InformixConn),
]
_ = max(_ if (conf.get("dbms") or Backend.getIdentifiedDbms() or kb.heuristicExtendedDbms or "").lower() in _[1] else None for _ in items)
if _:
items.remove(_)
items.insert(0, _)
for dbms, aliases, Handler, Connector in items:
handler = Handler()
conf.dbmsConnector = Connector()
if conf.direct:
dialect = DBMS_DICT[dbms][3]
if dialect:
sqlalchemy = SQLAlchemy(dialect=dialect)
sqlalchemy.connect()
if sqlalchemy.connector:
conf.dbmsConnector = sqlalchemy
else:
try:
conf.dbmsConnector.connect()
except NameError:
pass
else:
conf.dbmsConnector.connect()
if handler.checkDbms():
if kb.resolutionDbms:
conf.dbmsHandler = max(_ for _ in items if _[0] == kb.resolutionDbms)[2]()
else:
conf.dbmsHandler = handler
conf.dbmsHandler._dbms = dbms
break
else:
conf.dbmsConnector = None
# At this point back-end DBMS is correctly fingerprinted, no need
# to enforce it anymore
Backend.flushForcedDbms()
| gpl-3.0 |
robotlinker/robotlinker_core | src/rosbridge_suite/rosbridge_server/src/tornado/test/simple_httpclient_test.py | 19 | 22731 | from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from tornado.log import gen_log, app_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, bind_unused_port, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@gen.coroutine
def get(self):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.stream
yield stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_default_certificates_exist(self):
open(_default_ca_certs()).close()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
response = self.fetch('/trigger?wake=false', request_timeout=0.1)
self.assertEqual(response.code, 599)
self.assertTrue(0.099 < response.request_time < 0.15, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
#
# A test without a content-length header is included below
# in HTTP204NoContentTestCase.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, "Malformed HTTP message"):
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
server_socket, port = bind_unused_port()
server_socket.close()
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://localhost:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). Tornado always
# sends a content-length, so we simulate here a server that sends
# no content length and does not close the connection.
#
# Tests of a 204 response with a Content-Length header are included
# in SimpleHTTPClientTestMixin.
request.connection.stream.write(
b"HTTP/1.1 204 No content\r\n\r\n")
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
| apache-2.0 |
abhiQmar/servo | tests/wpt/css-tests/tools/pytest/testing/test_capture.py | 171 | 32410 | # note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
from __future__ import with_statement
import pickle
import os
import sys
import _pytest._code
import py
import pytest
import contextlib
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
from py.builtin import print_
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
if sys.version_info >= (3, 0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager:
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, 'dup', raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@needsosdup
@pytest.mark.parametrize("method",
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.init_capturings()
outerr = capman.suspendcapture()
assert outerr == ("", "")
outerr = capman.suspendcapture()
assert outerr == ("", "")
print ("hello")
out, err = capman.suspendcapture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resumecapture()
print ("hello")
out, err = capman.suspendcapture()
if method != "no":
assert out == "hello\n"
capman.reset_capturings()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.init_capturings()
pytest.raises(AssertionError, "capman.init_capturings()")
capman.reset_capturings()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2,2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile("""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print (sys.stdout)
print (%s)
""" % obj)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile("""
def test_unicode():
print ('b\\u00f6y')
""")
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_collect_capturing(testdir):
p = testdir.makepyfile("""
print ("collect %s failure" % 13)
import xyz42123
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*Captured stdout*",
"*collect 13 failure*",
])
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile("""
def setup_module(mod):
print ("setup module")
def setup_function(function):
print ("setup " + function.__name__)
def test_func1():
print ("in func1")
assert 0
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
])
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile("""
import sys
def setup_module(func):
print ("module-setup")
def setup_function(func):
print ("function-setup")
def test_func():
print ("in function")
assert 0
def teardown_function(func):
print ("in teardown")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
])
def test_no_carry_over(self, testdir):
p = testdir.makepyfile("""
def test_func1():
print ("in func1")
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile("""
def setup_function(function):
print ("setup func1")
def teardown_function(function):
print ("teardown func1")
assert 0
def test_func1():
print ("in func1")
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'*teardown_function*',
'*Captured stdout*',
"setup func1*",
"in func1*",
"teardown func1*",
#"*1 fixture failure*"
])
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile("""
def teardown_module(mod):
print ("teardown module")
assert 0
def test_func():
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
])
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile("""
import sys
def test_capturing():
print (42)
sys.stderr.write(str(23))
def test_capturing_error():
print (1)
sys.stderr.write(str(2))
raise ValueError
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_capturing_outerr.py .F",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
])
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile("""
def test_logging():
import logging
import pytest
stream = capture.TextIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
""")
result = testdir.runpytest_subprocess(p)
result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_function(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_function(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print (optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors show first!
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_module(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_module(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print (optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors come first
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_initialized_in_test(self, testdir):
p = testdir.makepyfile("""
import sys
def test_something():
# pytest does not import logging
assert 'logging' not in sys.modules
import logging
logging.basicConfig()
logging.warn("hello432")
assert 0
""")
result = testdir.runpytest_subprocess(
p, "--traceconfig",
"-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*hello432*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
logging.warn("hello435")
""")
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"WARNING*hello435*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
""")
# make sure that logging is still captured in tests
p = testdir.makepyfile("""
def test_hello():
import logging
logging.warn("hello433")
assert 0
""")
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"WARNING*hello433*",
])
assert 'something' not in result.stderr.str()
assert 'operation on closed file' not in result.stderr.str()
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource("""
def test_hello(capsys):
print (42)
out, err = capsys.readouterr()
assert out.startswith("42")
""", *opt)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"*capsys*capfd*same*time*",
"*ERROR*setup*test_two*",
"*capsys*capfd*same*time*",
"*2 error*"])
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile("""
def test_hello(cap%s):
print ("xxx42xxx")
assert 0
""" % method)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"xxx42xxx",
])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
""")
reprec.assertoutcome(passed=1)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile("""
def test_hello(capsys, missingarg):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_partial_setup_failure*",
"*1 error*",
])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile("""
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
""")
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines([
"*KeyboardInterrupt*"
])
assert result.ret == 2
@pytest.mark.issue14
def test_capture_and_logging(self, testdir):
p = testdir.makepyfile("""
import logging
def test_log(capsys):
logging.error('x')
""")
result = testdir.runpytest_subprocess(p)
assert 'closed' not in result.stderr.str()
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(_pytest._code.Source("""
def pytest_runtest_setup(item):
raise ValueError(42)
"""))
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
result.stdout.fnmatch_lines([
"*ValueError(42)*",
"*1 error*"
])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile("""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
""")
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert 'hello19' not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile("""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
""")
result = testdir.runpytest('--cap=fd')
result.stdout.fnmatch_lines('''
*def test_func*
*assert 0*
*Captured*
*1 failed*
''')
def test_capture_early_option_parsing(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert 'hello19' in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
""")
result = testdir.runpytest('--assert=plain')
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occurr during readouterr"""
testdir.makepyfile(pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
""")
result = testdir.runpytest_subprocess(
"-p", "pytest_xyz", "--version", syspathinsert=True
)
result.stderr.fnmatch_lines([
"*in bad_snap",
" raise Exception('boom')",
"Exception: boom",
])
class TestTextIO:
def test_text(self):
f = capture.TextIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.TextIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
pytest.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
pytest.raises(IOError, iter, f)
pytest.raises(ValueError, f.fileno)
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open('wb+')
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print_(i, end="", file=nf)
flist.append(nf)
for i in range(5):
f = flist[i]
f.close()
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
def test_dupfile_on_bytesio():
io = py.io.BytesIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == b"hello"
def test_dupfile_on_textio():
io = py.io.TextIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == "hello"
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except (py.process.cmdexec.Error, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open('wb+') as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print_("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == tobytes('')
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == totext(data1)
with open(tmpfile.name, 'rb') as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = tobytes("hello")
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, tobytes("world"))
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, tobytes("but now"))
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print ("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print ("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@pytest.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_reset_twice_error(self):
with self.getcapture() as cap:
print ("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.TextIO()
sys.stderr = capture.TextIO()
print ("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print ("cap1")
with self.getcapture() as cap2:
print ("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print ("XXX this test may well hang instead of crashing")
print ("XXX which indicates an error in the underlying capturing")
print ("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, "sys.stdin.read()")
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile("""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_x*
*assert 0*
*Captured stdout*
""")
def test_intermingling(self):
with self.getcapture() as cap:
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_,
Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
cap.stop_capturing()
""")
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
@needsosdup
@pytest.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile("""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_capture_again*
*assert 0*
*stdout*
*hello*
""")
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warn("hello1")
outerr = cap.readouterr()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.pop_outerr_to_orig()
logging.warn("hello3")
outerr = cap.readouterr()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines("""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
""")
result.stderr.fnmatch_lines("""
WARNING:root:hello2
""")
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_pickling_and_unpickling_enocded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
| mpl-2.0 |
NumberZeroSoftware/PDFINVEST | pdfapp/migrations/0010_auto_20170225_0034.py | 1 | 2473 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-25 00:34
from __future__ import unicode_literals
from django.db import migrations, models
import pdfapp.validators
class Migration(migrations.Migration):
dependencies = [
('pdfapp', '0009_auto_20170223_2247'),
]
operations = [
migrations.AlterField(
model_name='program',
name='credits',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_credits]),
),
migrations.AlterField(
model_name='program',
name='evaluation_strategies',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='laboratory_hours',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_positive_integer]),
),
migrations.AlterField(
model_name='program',
name='methodological_strategies',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='objectives',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='practice_hours',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_positive_integer]),
),
migrations.AlterField(
model_name='program',
name='recommended_sources',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='requirements',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='synoptic_content',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='program',
name='theory_hours',
field=models.IntegerField(blank=True, null=True, validators=[pdfapp.validators.validate_positive_integer]),
),
migrations.AlterField(
model_name='program',
name='validity_year',
field=models.IntegerField(validators=[pdfapp.validators.validate_program_years]),
),
]
| mit |
alfredhq/djlint | djlint/analyzers/base.py | 3 | 8412 | import ast
import os
from .context import Context
class BaseAnalyzer(object):
"""
Base code analyzer class. Takes dict `file path => ast node` as first
param and path to repository as second.
Subclass this class and implement `analyze_file` method if you want to
create new code analyzer.
"""
surround_by = 2
def __init__(self, code_dict, repo_path):
self._file_lines = None
self.code_dict = code_dict
self.repo_path = repo_path
def get_file_lines(self, filepath, start, stop):
"""
Yield code snippet from file `filepath` for line number `lineno`
as tuples `(<line number>, <importance>, <text>)` extending it by
`surround_by` lines up and down if possible.
If important part has blank lines at the bottom they will be removed.
"""
if self._file_lines is None:
with open(os.path.join(self.repo_path, filepath)) as f:
self._file_lines = f.readlines()
if stop is None:
lines = self._file_lines[start - 1:]
else:
lines = self._file_lines[start - 1:stop]
for i, line in enumerate(lines):
lines[i] = [start + i, True, line.rstrip()]
while lines and self.is_empty_line(lines[-1][-1]):
lines.pop()
if not lines:
return []
stop = lines[0][0]
start = max(1, stop - self.surround_by)
prefix_lines = []
for i, line in enumerate(self._file_lines[start - 1:stop - 1], start=start):
prefix_lines.append([i, False, line.rstrip()])
start = lines[-1][0] + 1
stop = start + self.surround_by
suffix_lines = []
for i, line in enumerate(self._file_lines[start - 1:stop - 1], start=start):
suffix_lines.append([i, False, line.rstrip()])
return prefix_lines + lines + suffix_lines
def is_empty_line(self, line):
return not line.split('#')[0].strip()
def clear_file_lines_cache(self):
self._file_lines = None
def analyze_file(self, filepath, code):
raise NotImplementedError
def analyze(self):
"""
Iterate over `code_dict` and yield all results from every file.
"""
for filepath, code in self.code_dict.items():
for result in self.analyze_file(filepath, code):
yield result
self.clear_file_lines_cache()
class CodeSnippet(list):
"""
Represents code snippet as list of tuples `(<line number>, <importance>,
<text>)`.
Use `add_line` method to add new lines to the snippet.
"""
def add_line(self, lineno, text, important=True):
"""
Add new line to the end of snippet.
"""
self.append((lineno, important, text))
class Result(object):
"""
Represents the result of code analysis.
"""
def __init__(self, description, path, line):
self.description = description
self.path = path
self.line = line
self.source = CodeSnippet()
self.solution = CodeSnippet()
class AttributeVisitor(ast.NodeVisitor):
"""
Process attribute node and build the name of the attribute if possible.
Currently only simple expressions are supported (like `foo.bar.baz`).
If it is not possible to get attribute name as string `is_usable` is
set to `True`.
After `visit()` method call `get_name()` method can be used to get
attribute name if `is_usable` == `True`.
"""
def __init__(self):
self.is_usable = True
self.name = []
def get_name(self):
"""
Get the name of the visited attribute.
"""
return '.'.join(self.name)
def visit_Attribute(self, node):
self.generic_visit(node)
self.name.append(node.attr)
def visit_Name(self, node):
self.name.append(node.id)
def visit_Load(self, node):
pass
def generic_visit(self, node):
# If attribute node consists not only from nodes of types `Attribute`
# and `Name` mark it as unusable.
if not isinstance(node, ast.Attribute):
self.is_usable = False
ast.NodeVisitor.generic_visit(self, node)
def set_lineno(meth):
def decorator(self, node):
self.push_lineno(node.lineno)
result = meth(self, node)
self.pop_lineno()
return result
decorator.__name__ = meth.__name__
return decorator
class ModuleVisitor(ast.NodeVisitor):
"""
Collect interesting imported names during module nodes visiting.
"""
interesting = {}
def __init__(self):
self.names = Context()
self.lineno = []
self.found = {}
def add_found(self, name, node):
lineno_level = self.get_lineno_level()
if lineno_level not in self.found:
self.found[lineno_level] = []
self.found[lineno_level].append([name, node, self.get_lineno(), None])
def get_found(self):
for level in self.found.values():
for found in level:
yield found
def push_lineno(self, lineno):
self.lineno.append(lineno)
lineno_level = self.get_lineno_level()
for level in self.found.keys():
if level < lineno_level:
return
for found in self.found[level]:
if found[-1] is None and lineno >= found[-2]:
found[-1] = max(lineno - 1, found[-2])
def pop_lineno(self):
return self.lineno.pop()
def get_lineno(self):
return self.lineno[-1]
def get_lineno_level(self):
return len(self.lineno)
def update_names(self, aliases, get_path):
"""
Update `names` context with interesting imported `aliases` using
`get_path` function to get full path to the object by object name.
"""
for alias in aliases:
path = get_path(alias.name)
if path not in self.interesting:
continue
if self.interesting[path]:
for attr in self.interesting[path]:
name = '.'.join((alias.asname or alias.name, attr))
self.names[name] = '.'.join((path, attr))
else:
name = alias.asname or alias.name
self.names[name] = path
@set_lineno
def visit_Import(self, node):
self.update_names(node.names, lambda x: x)
@set_lineno
def visit_ImportFrom(self, node):
self.update_names(node.names, lambda x: '.'.join((node.module, x)))
@set_lineno
def visit_FunctionDef(self, node):
# Create new scope in `names` context if we are coming to function body
self.names.push()
self.generic_visit(node)
self.names.pop()
@set_lineno
def visit_Assign(self, node):
# Some assingments attach interesting imports to new names.
# Trying to parse it.
visitor = AttributeVisitor()
visitor.visit(node.value)
if not visitor.is_usable:
# Seems on the right side is not an attribute. Let's visit
# assignment as it also can contain interesting code.
self.generic_visit(node)
return
name = visitor.get_name()
# skipping assignment if value is not interesting
if name not in self.names:
return
# trying to parse the left-side attribute name
for target in node.targets:
visitor = AttributeVisitor()
visitor.visit(target)
if not visitor.is_usable:
continue
target = visitor.get_name()
self.names[target] = self.names[name]
@set_lineno
def visit_Call(self, node):
self.generic_visit(node)
@set_lineno
def visit_List(self, node):
self.generic_visit(node)
@set_lineno
def visit_Tuple(self, node):
self.generic_visit(node)
class DeprecatedCodeVisitor(ModuleVisitor):
def visit_Attribute(self, node):
visitor = AttributeVisitor()
visitor.visit(node)
if visitor.is_usable:
name = visitor.get_name()
if name in self.names:
self.add_found(self.names[name], node)
def visit_Name(self, node):
if node.id in self.names:
self.add_found(self.names[node.id], node)
| isc |
eriwoon/ShellScriptCollect | Python/replaceAVP.py | 1 | 3204 | #! /usr/bin/python
#coding: utf-8
import sys
import os
import re
def findAllFile(dir):
folder = [dir]
file = []
while len(folder) > 0:
curDir = folder[0]
folder.pop(0)
lst = os.listdir(curDir)
for i in lst:
name = curDir + '\\' + i
if os.path.isfile(name) == True:
file.append(name)
else:
folder.append(name)
return file
def filterExtension(files, extension):
outputFiles = []
for file in files:
if os.path.splitext(file)[1] == extension:
outputFiles.append(file)
return outputFiles
def avpReplaceDefination():
return {
re.compile('^\s*avp\s+264\s*$') : '''ASSIGN_RULE = crl_begin
shadow0;
} crl_end;
''',
re.compile('^\s*avp\s+296\s*$') : '''ASSIGN_RULE = crl_begin
shadow1;
} crl_end;
'''
}
def replaceFilesWithAvp(files, avpReplace):
log = open("replaceAVP.py.log", "a+")
for file in files:
log.write("open file:" + file + "\n")
f = open(file, 'r')
new_content = ""
reAssignmentModeA = re.compile("^\s*ASSIGNMENT_MODE\s*=\s*A\s*;.*")
reAssignmentModeR = re.compile("^\s*ASSIGNMENT_MODE\s*=\s*R\s*;.*")
reEvtAttrId = re.compile("^\s*EVT_ATTR_ID\s*=.*;.*")
reLiftBrace = re.compile("^\s*{.*")
reRightBrace = re.compile("^\s*}.*")
lineNo = 1
line = f.readline()
while line != "":
patternMatched = False
for pattern in avpReplace:
if pattern.match(line) :
#print("line matched pattern : " + line)
log.write(str(lineNo) + ":line matched pattern : " + line)
patternMatched = True
new_content += line
lineNo += 1
line = f.readline()
while line != "":
#This is the place to find all content enclosed by { }
if reRightBrace.match(line):
#print("reRightBrace.matched" + line)
log.write(str(lineNo) + ":reRightBrace.matched : " + line + '\n')
new_content += line
break
elif reAssignmentModeA.match(line):
#print("reAssignmentModeA.matched" + line)
log.write(str(lineNo) + ":reAssignmentModeA.matched : " + line)
split = line.split("=")
newline = split[0] + "=" + split[1].replace("A","R")
new_content += newline
elif reAssignmentModeR.match(line):
#print("reAssignmentModeR.matched" + line)
log.write(str(lineNo) + ":reAssignmentModeR.matched : " + line)
pass
elif reEvtAttrId.match(line):
#print("reEvtAttrId.matched" + line)
log.write(str(lineNo) + ":reEvtAttrId.matched : " + line)
split = line.split("EVT_ATTR_ID")
newline = split[0] + avpReplace[pattern]
new_content += newline
else:
new_content += line
lineNo += 1
line = f.readline()
if patternMatched == False:
new_content += line
lineNo += 1
line = f.readline()
f.close()
fout = open(file, "w")
fout.write(new_content)
fout.close()
log.write("close file:" + file + " Finished\n")
log.close()
if __name__ == '__main__':
cwd = os.getcwd()
files = findAllFile(cwd)
files = filterExtension(files, '.diamEncoding')
#print(files)
avpReplace = avpReplaceDefination()
replaceFilesWithAvp(files, avpReplace)
print("Replace finished, please refer to replaceAVP.py.log")
| mit |
lochiiconnectivity/boto | boto/connection.py | 1 | 44959 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010 Google
# Copyright (c) 2008 rPath, Inc.
# Copyright (c) 2009 The Echo Nest Corporation
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Parts of this code were copied or derived from sample code supplied by AWS.
# The following notice applies to that code.
#
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006 Amazon Digital Services, Inc. or its
# affiliates.
"""
Handles basic connections to AWS
"""
from __future__ import with_statement
import base64
import errno
import httplib
import os
import Queue
import random
import re
import socket
import sys
import time
import urllib
import urlparse
import xml.sax
import copy
import auth
import auth_handler
import boto
import boto.utils
import boto.handler
import boto.cacerts
from boto import config, UserAgent
from boto.exception import AWSConnectionError
from boto.exception import BotoClientError
from boto.exception import BotoServerError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.resultset import ResultSet
HAVE_HTTPS_CONNECTION = False
try:
import ssl
from boto import https_connection
# Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError.
if hasattr(ssl, 'SSLError'):
HAVE_HTTPS_CONNECTION = True
except ImportError:
pass
try:
import threading
except ImportError:
import dummy_threading as threading
ON_APP_ENGINE = all(key in os.environ for key in (
'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID'))
PORTS_BY_SECURITY = {True: 443,
False: 80}
DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__ )), "cacerts.txt")
class HostConnectionPool(object):
"""
A pool of connections for one remote (host,is_secure).
When connections are added to the pool, they are put into a
pending queue. The _mexe method returns connections to the pool
before the response body has been read, so they connections aren't
ready to send another request yet. They stay in the pending queue
until they are ready for another request, at which point they are
returned to the pool of ready connections.
The pool of ready connections is an ordered list of
(connection,time) pairs, where the time is the time the connection
was returned from _mexe. After a certain period of time,
connections are considered stale, and discarded rather than being
reused. This saves having to wait for the connection to time out
if AWS has decided to close it on the other end because of
inactivity.
Thread Safety:
This class is used only from ConnectionPool while it's mutex
is held.
"""
def __init__(self):
self.queue = []
def size(self):
"""
Returns the number of connections in the pool for this host.
Some of the connections may still be in use, and may not be
ready to be returned by get().
"""
return len(self.queue)
def put(self, conn):
"""
Adds a connection to the pool, along with the time it was
added.
"""
self.queue.append((conn, time.time()))
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None of there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None
def _conn_ready(self, conn):
"""
There is a nice state diagram at the top of httplib.py. It
indicates that once the response headers have been read (which
_mexe does before adding the connection to the pool), a
response is attached to the connection, and it stays there
until it's done reading. This isn't entirely true: even after
the client is done reading, the response may be closed, but
not removed from the connection yet.
This is ugly, reading a private instance variable, but the
state we care about isn't available in any public methods.
"""
if ON_APP_ENGINE:
# Google AppEngine implementation of HTTPConnection doesn't contain
# _HTTPConnection__response attribute. Moreover, it's not possible
# to determine if given connection is ready. Reusing connections
# simply doesn't make sense with App Engine urlfetch service.
return False
else:
response = getattr(conn, '_HTTPConnection__response', None)
return (response is None) or response.isclosed()
def clean(self):
"""
Get rid of stale connections.
"""
# Note that we do not close the connection here -- somebody
# may still be reading from it.
while len(self.queue) > 0 and self._pair_stale(self.queue[0]):
self.queue.pop(0)
def _pair_stale(self, pair):
"""
Returns true of the (connection,time) pair is too old to be
used.
"""
(_conn, return_time) = pair
now = time.time()
return return_time + ConnectionPool.STALE_DURATION < now
class ConnectionPool(object):
"""
A connection pool that expires connections after a fixed period of
time. This saves time spent waiting for a connection that AWS has
timed out on the other end.
This class is thread-safe.
"""
#
# The amout of time between calls to clean.
#
CLEAN_INTERVAL = 5.0
#
# How long before a connection becomes "stale" and won't be reused
# again. The intention is that this time is less that the timeout
# period that AWS uses, so we'll never try to reuse a connection
# and find that AWS is timing it out.
#
# Experimentation in July 2011 shows that AWS starts timing things
# out after three minutes. The 60 seconds here is conservative so
# we should never hit that 3-minute timout.
#
STALE_DURATION = 60.0
def __init__(self):
# Mapping from (host,is_secure) to HostConnectionPool.
# If a pool becomes empty, it is removed.
self.host_to_pool = {}
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
ConnectionPool.STALE_DURATION = \
config.getfloat('Boto', 'connection_stale_duration',
ConnectionPool.STALE_DURATION)
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
pickled_dict['host_to_pool'] = {}
del pickled_dict['mutex']
return pickled_dict
def __setstate__(self, dct):
self.__init__()
def size(self):
"""
Returns the number of connections in the pool.
"""
return sum(pool.size() for pool in self.host_to_pool.values())
def get_http_connection(self, host, is_secure):
"""
Gets a connection from the pool for the named host. Returns
None if there is no connection that can be reused. It's the caller's
responsibility to call close() on the connection when it's no longer
needed.
"""
self.clean()
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
return None
return self.host_to_pool[key].get()
def put_http_connection(self, host, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
"""
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn)
def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
"""
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now
class HTTPRequest(object):
def __init__(self, method, protocol, host, port, path, auth_path,
params, headers, body):
"""Represents an HTTP request.
:type method: string
:param method: The HTTP method name, 'GET', 'POST', 'PUT' etc.
:type protocol: string
:param protocol: The http protocol used, 'http' or 'https'.
:type host: string
:param host: Host to which the request is addressed. eg. abc.com
:type port: int
:param port: port on which the request is being sent. Zero means unset,
in which case default port will be chosen.
:type path: string
:param path: URL path that is being accessed.
:type auth_path: string
:param path: The part of the URL path used when creating the
authentication string.
:type params: dict
:param params: HTTP url query parameters, with key as name of
the param, and value as value of param.
:type headers: dict
:param headers: HTTP headers, with key as name of the header and value
as value of header.
:type body: string
:param body: Body of the HTTP request. If not present, will be None or
empty string ('').
"""
self.method = method
self.protocol = protocol
self.host = host
self.port = port
self.path = path
if auth_path is None:
auth_path = path
self.auth_path = auth_path
self.params = params
# chunked Transfer-Encoding should act only on PUT request.
if headers and 'Transfer-Encoding' in headers and \
headers['Transfer-Encoding'] == 'chunked' and \
self.method != 'PUT':
self.headers = headers.copy()
del self.headers['Transfer-Encoding']
else:
self.headers = headers
self.body = body
def __str__(self):
return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) '
'params(%s) headers(%s) body(%s)') % (self.method,
self.protocol, self.host, self.port, self.path, self.params,
self.headers, self.body))
def authorize(self, connection, **kwargs):
for key in self.headers:
val = self.headers[key]
if isinstance(val, unicode):
self.headers[key] = urllib.quote_plus(val.encode('utf-8'))
connection._auth_handler.add_auth(self, **kwargs)
self.headers['User-Agent'] = UserAgent
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
if 'Content-Length' not in self.headers:
if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
class HTTPResponse(httplib.HTTPResponse):
def __init__(self, *args, **kwargs):
httplib.HTTPResponse.__init__(self, *args, **kwargs)
self._cached_response = ''
def read(self, amt=None):
"""Read the response.
This method does not have the same behavior as
httplib.HTTPResponse.read. Instead, if this method is called with
no ``amt`` arg, then the response body will be cached. Subsequent
calls to ``read()`` with no args **will return the cached response**.
"""
if amt is None:
# The reason for doing this is that many places in boto call
# response.read() and except to get the response body that they
# can then process. To make sure this always works as they expect
# we're caching the response so that multiple calls to read()
# will return the full body. Note that this behavior only
# happens if the amt arg is not specified.
if not self._cached_response:
self._cached_response = httplib.HTTPResponse.read(self)
return self._cached_response
else:
return httplib.HTTPResponse.read(self, amt)
class AWSAuthConnection(object):
def __init__(self, host, aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
provider='aws', security_token=None,
suppress_consec_slashes=True,
validate_certs=True):
"""
:type host: str
:param host: The host to make the connection to
:keyword str aws_access_key_id: Your AWS Access Key ID (provided by
Amazon). If none is specified, the value in your
``AWS_ACCESS_KEY_ID`` environmental variable is used.
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
:type https_connection_factory: list or tuple
:param https_connection_factory: A pair of an HTTP connection
factory and the exceptions to catch. The factory should have
a similar interface to L{httplib.HTTPSConnection}.
:param str proxy: Address/hostname for a proxy server
:type proxy_port: int
:param proxy_port: The port to use when connecting over a proxy
:type proxy_user: str
:param proxy_user: The username to connect with on the proxy
:type proxy_pass: str
:param proxy_pass: The password to use when connection over a proxy.
:type port: int
:param port: The port to use to connect
:type suppress_consec_slashes: bool
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type validate_certs: bool
:param validate_certs: Controls whether SSL certificates
will be validated or not. Defaults to True.
"""
self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
# Override passed-in is_secure setting if value was defined in config.
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
# Whether or not to validate server certificates.
# The default is now to validate certificates. This can be
# overridden in the boto config file are by passing an
# explicit validate_certs parameter to the class constructor.
self.https_validate_certificates = config.getbool(
'Boto', 'https_validate_certificates',
validate_certs)
if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION:
raise BotoClientError(
"SSL server certificate validation is enabled in boto "
"configuration, but Python dependencies required to "
"support this feature are not available. Certificate "
"validation is only supported when running under Python "
"2.6 or later.")
self.ca_certificates_file = config.get_value(
'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
# define exceptions from httplib that we want to catch and retry
self.http_exceptions = (httplib.HTTPException, socket.error,
socket.gaierror, httplib.BadStatusLine)
# define subclasses of the above that are not retryable.
self.http_unretryable_exceptions = []
if HAVE_HTTPS_CONNECTION:
self.http_unretryable_exceptions.append(
https_connection.InvalidCertificateException)
# define values in socket exceptions we don't want to catch
self.socket_exception_values = (errno.EINTR,)
if https_connection_factory is not None:
self.https_connection_factory = https_connection_factory[0]
self.http_exceptions += https_connection_factory[1]
else:
self.https_connection_factory = None
if (is_secure):
self.protocol = 'https'
else:
self.protocol = 'http'
self.host = host
self.path = path
# if the value passed in for debug
if not isinstance(debug, (int, long)):
debug = 0
self.debug = config.getint('Boto', 'debug', debug)
if port:
self.port = port
else:
self.port = PORTS_BY_SECURITY[is_secure]
# Timeout used to tell httplib how long to wait for socket timeouts.
# Default is to leave timeout unchanged, which will in turn result in
# the socket's default global timeout being used. To specify a
# timeout, set http_socket_timeout in Boto config. Regardless,
# timeouts will only be applied if Python is 2.6 or greater.
self.http_connection_kwargs = {}
if (sys.version_info[0], sys.version_info[1]) >= (2, 6):
# If timeout isn't defined in boto config file, use 70 second
# default as recommended by
# http://docs.aws.amazon.com/amazonswf/latest/apireference/API_PollForActivityTask.html
self.http_connection_kwargs['timeout'] = config.getint(
'Boto', 'http_socket_timeout', 70)
if isinstance(provider, Provider):
# Allow overriding Provider
self.provider = provider
else:
self._provider_type = provider
self.provider = Provider(self._provider_type,
aws_access_key_id,
aws_secret_access_key,
security_token)
# Allow config file to override default host and port.
if self.provider.host:
self.host = self.provider.host
if self.provider.port:
self.port = self.provider.port
self._pool = ConnectionPool()
self._connection = (self.server_name(), self.is_secure)
self._last_rs = None
self._auth_handler = auth.get_auth_handler(
host, config, self.provider, self._required_auth_capability())
if getattr(self, 'AuthServiceName', None) is not None:
self.auth_service_name = self.AuthServiceName
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.host)
def _required_auth_capability(self):
return []
def _get_auth_service_name(self):
return getattr(self._auth_handler, 'service_name')
# For Sigv4, the auth_service_name/auth_region_name properties allow
# the service_name/region_name to be explicitly set instead of being
# derived from the endpoint url.
def _set_auth_service_name(self, value):
self._auth_handler.service_name = value
auth_service_name = property(_get_auth_service_name, _set_auth_service_name)
def _get_auth_region_name(self):
return getattr(self._auth_handler, 'region_name')
def _set_auth_region_name(self, value):
self._auth_handler.region_name = value
auth_region_name = property(_get_auth_region_name, _set_auth_region_name)
def connection(self):
return self.get_http_connection(*self._connection)
connection = property(connection)
def aws_access_key_id(self):
return self.provider.access_key
aws_access_key_id = property(aws_access_key_id)
gs_access_key_id = aws_access_key_id
access_key = aws_access_key_id
def aws_secret_access_key(self):
return self.provider.secret_key
aws_secret_access_key = property(aws_secret_access_key)
gs_secret_access_key = aws_secret_access_key
secret_key = aws_secret_access_key
def get_path(self, path='/'):
# The default behavior is to suppress consecutive slashes for reasons
# discussed at
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
return self.path + re.sub('^(/*)/', "\\1", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
path = path[:pos]
else:
params = None
if path[-1] == '/':
need_trailing = True
else:
need_trailing = False
path_elements = self.path.split('/')
path_elements.extend(path.split('/'))
path_elements = [p for p in path_elements if p]
path = '/' + '/'.join(path_elements)
if path[-1] != '/' and need_trailing:
path += '/'
if params:
path = path + params
return path
def server_name(self, port=None):
if not port:
port = self.port
if port == 80:
signature_host = self.host
else:
# This unfortunate little hack can be attributed to
# a difference in the 2.6 version of httplib. In old
# versions, it would append ":443" to the hostname sent
# in the Host header and so we needed to make sure we
# did the same when calculating the V2 signature. In 2.6
# (and higher!)
# it no longer does that. Hence, this kludge.
if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or
sys.version[:3] in ('2.6', '2.7')) and port == 443:
signature_host = self.host
else:
signature_host = '%s:%d' % (self.host, port)
return signature_host
def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy = proxy
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?' \
'(?:(?P<user>\w+):(?P<pass>.*)@)?' \
'(?P<host>[\w\-\.]+)' \
'(?::(?P<port>\d+))?'
)
match = pattern.match(os.environ['http_proxy'])
if match:
self.proxy = match.group('host')
self.proxy_port = match.group('port')
self.proxy_user = match.group('user')
self.proxy_pass = match.group('pass')
else:
if not self.proxy:
self.proxy = config.get_value('Boto', 'proxy', None)
if not self.proxy_port:
self.proxy_port = config.get_value('Boto', 'proxy_port', None)
if not self.proxy_user:
self.proxy_user = config.get_value('Boto', 'proxy_user', None)
if not self.proxy_pass:
self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
if not self.proxy_port and self.proxy:
print "http_proxy environment variable does not specify " \
"a port, using default"
self.proxy_port = self.port
self.use_proxy = (self.proxy != None)
def get_http_connection(self, host, is_secure):
conn = self._pool.get_http_connection(host, is_secure)
if conn is not None:
return conn
else:
return self.new_http_connection(host, is_secure)
def new_http_connection(self, host, is_secure):
if self.use_proxy and not is_secure:
host = '%s:%d' % (self.proxy, int(self.proxy_port))
if host is None:
host = self.server_name()
if is_secure:
boto.log.debug(
'establishing HTTPS connection: host=%s, kwargs=%s',
host, self.http_connection_kwargs)
if self.use_proxy:
connection = self.proxy_ssl(host, is_secure and 443 or 80)
elif self.https_connection_factory:
connection = self.https_connection_factory(host)
elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
connection = https_connection.CertValidatingHTTPSConnection(
host, ca_certs=self.ca_certificates_file,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPSConnection(host,
**self.http_connection_kwargs)
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
self.http_connection_kwargs)
if self.https_connection_factory:
# even though the factory says https, this is too handy
# to not be able to allow overriding for http also.
connection = self.https_connection_factory(host,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPConnection(host,
**self.http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
# self.connection must be maintained for backwards-compatibility
# however, it must be dynamically pulled from the connection pool
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
self._connection = (host, is_secure)
# Set the response class of the http connection to use our custom
# class.
connection.response_class = HTTPResponse
return connection
def put_http_connection(self, host, is_secure, connection):
self._pool.put_http_connection(host, is_secure, connection)
def proxy_ssl(self, host=None, port=None):
if host and port:
host = '%s:%d' % (host, port)
else:
host = '%s:%d' % (self.host, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.proxy, int(self.proxy_port)))
if "timeout" in self.http_connection_kwargs:
sock.settimeout(self.http_connection_kwargs["timeout"])
except:
raise
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
sock.sendall("User-Agent: %s\r\n" % UserAgent)
if self.proxy_user and self.proxy_pass:
for k, v in self.get_proxy_auth_header().items():
sock.sendall("%s: %s\r\n" % (k, v))
# See discussion about this config option at
# https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc
if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False):
sock.sendall("\r\n")
else:
sock.sendall("\r\n")
resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug)
resp.begin()
if resp.status != 200:
# Fake a socket error, use a code that make it obvious it hasn't
# been generated by the socket library
raise socket.error(-71,
"Error talking to HTTP proxy %s:%s: %s (%s)" %
(self.proxy, self.proxy_port,
resp.status, resp.reason))
# We can safely close the response, it duped the original socket
resp.close()
h = httplib.HTTPConnection(host)
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
boto.log.debug("wrapping ssl socket for proxied connection; "
"CA certificate file=%s",
self.ca_certificates_file)
key_file = self.http_connection_kwargs.get('key_file', None)
cert_file = self.http_connection_kwargs.get('cert_file', None)
sslSock = ssl.wrap_socket(sock, keyfile=key_file,
certfile=cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certificates_file)
cert = sslSock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not https_connection.ValidateCertificateHostname(cert, hostname):
raise https_connection.InvalidCertificateException(
hostname, cert, 'hostname mismatch')
else:
# Fallback for old Python without ssl.wrap_socket
if hasattr(httplib, 'ssl'):
sslSock = httplib.ssl.SSLSocket(sock)
else:
sslSock = socket.ssl(sock, None, None)
sslSock = httplib.FakeSocket(sock, sslSock)
# This is a bit unclean
h.sock = sslSock
return h
def prefix_proxy_to_path(self, path, host=None):
path = self.protocol + '://' + (host or self.server_name()) + path
return path
def get_proxy_auth_header(self):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
boto.log.debug('Params: %s' % request.params)
response = None
body = None
e = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests.
next_sleep = random.random() * (2 ** i)
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
connection.request(request.method, request.path,
request.body, request.headers)
response = connection.getresponse()
location = response.getheader('location')
# -- gross hack --
# httplib gets confused with chunked responses to HEAD requests
# so I have to fake it out
if request.method == 'HEAD' and getattr(response,
'chunked', False):
response.chunked = 0
if callable(retry_handler):
status = retry_handler(response, i, next_sleep)
if status:
msg, i, next_sleep = status
if msg:
boto.log.debug(msg)
time.sleep(next_sleep)
continue
if response.status == 500 or response.status == 503:
msg = 'Received %d response. ' % response.status
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
self.put_http_connection(request.host, self.is_secure,
connection)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
scheme == 'https')
response = None
continue
except PleaseRetryException, e:
boto.log.debug('encountered a retry exception: %s' % e)
connection = self.new_http_connection(request.host,
self.is_secure)
response = e.response
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
boto.log.debug(
'encountered unretryable %s exception, re-raising' %
e.__class__.__name__)
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
connection = self.new_http_connection(request.host,
self.is_secure)
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already happened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
raise e
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
if params == None:
params = {}
else:
params = params.copy()
if headers == None:
headers = {}
else:
headers = headers.copy()
host = host or self.host
if self.use_proxy:
if not auth_path:
auth_path = path
path = self.prefix_proxy_to_path(path, host)
if self.proxy_user and self.proxy_pass and not self.is_secure:
# If is_secure, we don't have to set the proxy authentication
# header here, we did that in the CONNECT to the proxy.
headers.update(self.get_proxy_auth_header())
return HTTPRequest(method, self.protocol, host, self.port,
path, auth_path, params, headers, data)
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
params=None, retry_handler=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries,
retry_handler=retry_handler)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
and making a new request will open a connection again."""
boto.log.debug('closing all HTTP connections')
self._connection = None # compat field
class AWSQueryConnection(AWSAuthConnection):
APIVersion = ''
ResponseError = BotoServerError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
debug, https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return []
def get_utf8_value(self, value):
return boto.utils.get_utf8_value(value)
def make_request(self, action, params=None, path='/', verb='GET'):
http_request = self.build_base_http_request(verb, path, None,
params, {}, '',
self.server_name())
if action:
http_request.params['Action'] = action
if self.APIVersion:
http_request.params['Version'] = self.APIVersion
return self._mexe(http_request)
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def build_complex_list_params(self, params, items, label, names):
"""Serialize a list of structures.
For example::
items = [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')]
label = 'ParamName.member'
names = ('One', 'Two', 'Three')
self.build_complex_list_params(params, items, label, names)
would result in the params dict being updated with these params::
ParamName.member.1.One = foo
ParamName.member.1.Two = bar
ParamName.member.1.Three = baz
ParamName.member.2.One = foo2
ParamName.member.2.Two = bar2
ParamName.member.2.Three = baz2
:type params: dict
:param params: The params dict. The complex list params
will be added to this dict.
:type items: list of tuples
:param items: The list to serialize.
:type label: string
:param label: The prefix to apply to the parameter.
:type names: tuple of strings
:param names: The names associated with each tuple element.
"""
for i, item in enumerate(items, 1):
current_prefix = '%s.%s' % (label, i)
for key, value in zip(names, item):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
# generics
def get_list(self, action, params, markers, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet(markers)
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_object(self, action, params, cls, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
xml.sax.parseString(body, h)
return obj
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_status(self, action, params, path='/', parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet()
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs.status
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| mit |
psavery/avogadro | libavogadro/src/python/unittest/camera.py | 9 | 2353 | from PyQt4.Qt import *
from numpy import *
import Avogadro
import sys
import unittest
from util import *
class TestCamera(unittest.TestCase):
def setUp(self):
# create the GLWidget and load the default engines
self.glwidget = Avogadro.GLWidget()
self.glwidget.loadDefaultEngines()
self.molecule = Avogadro.molecules.addMolecule()
self.molecule.addAtom()
self.glwidget.molecule = self.molecule
self.assertNotEqual(self.glwidget.camera, None)
def tearDown(self):
# create the GLWidget and load the default engines
None
def test_parent(self):
self.assertNotEqual(self.glwidget.camera.parent, None)
def test_angleOfViewY(self):
self.assert_(self.glwidget.camera.angleOfViewY)
testReadWriteProperty(self, self.glwidget.camera.angleOfViewY, 40.0, 60.0)
def test_modelview(self):
self.glwidget.camera.modelview
m = self.glwidget.camera.modelview
self.glwidget.camera.modelview = m
def test_various(self):
self.glwidget.camera.applyPerspective()
self.glwidget.camera.applyModelview()
self.glwidget.camera.initializeViewPoint()
dist = self.glwidget.camera.distance(array([0., 0., 0.]))
self.glwidget.camera.translate(array([0., 0., 0.]))
self.glwidget.camera.pretranslate(array([0., 0., 0.]))
self.glwidget.camera.rotate(3.14, array([0., 0., 0.]))
self.glwidget.camera.prerotate(3.14, array([0., 0., 0.]))
self.glwidget.camera.normalize()
def test_axes(self):
self.glwidget.camera.transformedXAxis
self.glwidget.camera.transformedYAxis
self.glwidget.camera.transformedZAxis
self.glwidget.camera.backTransformedXAxis
self.glwidget.camera.backTransformedYAxis
self.glwidget.camera.backTransformedZAxis
def test_project(self):
point = QPoint(10,20)
self.assertEqual(len(self.glwidget.camera.unProject(point)), 3)
self.assertEqual(len(self.glwidget.camera.unProject(point, array([1., 0., 0.]))), 3)
# added to fix name conflict WithZ
self.assertEqual(len(self.glwidget.camera.unProjectWithZ(array([1., 2., 0.]))), 3)
self.assertEqual(len(self.glwidget.camera.project(array([1., 2., 3.]))), 3)
if __name__ == "__main__":
# create a new application
# (must be done before creating a GLWidget)
app = QApplication(sys.argv)
unittest.main()
sys.exit(app.exec_())
| gpl-2.0 |
google/grr | grr/server/grr_response_server/flows/general/registry.py | 1 | 6530 | #!/usr/bin/env python
"""Gather information from the registry on windows."""
from grr_response_core import config
from grr_response_core.lib import artifact_utils
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_core.path_detection import windows as path_detection_windows
from grr_response_proto import flows_pb2
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server.flows.general import collectors
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import transfer
class RegistryFinderCondition(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.RegistryFinderCondition
rdf_deps = [
rdf_file_finder.FileFinderContentsLiteralMatchCondition,
rdf_file_finder.FileFinderContentsRegexMatchCondition,
rdf_file_finder.FileFinderModificationTimeCondition,
rdf_file_finder.FileFinderSizeCondition,
]
class RegistryFinderArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.RegistryFinderArgs
rdf_deps = [
rdf_paths.GlobExpression,
RegistryFinderCondition,
]
def _ConditionsToFileFinderConditions(conditions):
"""Converts FileFinderSizeConditions to RegistryFinderConditions."""
ff_condition_type_cls = rdf_file_finder.FileFinderCondition.Type
result = []
for c in conditions:
if c.condition_type == RegistryFinderCondition.Type.MODIFICATION_TIME:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.MODIFICATION_TIME,
modification_time=c.modification_time))
elif c.condition_type == RegistryFinderCondition.Type.VALUE_LITERAL_MATCH:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.CONTENTS_LITERAL_MATCH,
contents_literal_match=c.value_literal_match))
elif c.condition_type == RegistryFinderCondition.Type.VALUE_REGEX_MATCH:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.CONTENTS_REGEX_MATCH,
contents_regex_match=c.value_regex_match))
elif c.condition_type == RegistryFinderCondition.Type.SIZE:
result.append(
rdf_file_finder.FileFinderCondition(
condition_type=ff_condition_type_cls.SIZE, size=c.size))
else:
raise ValueError("Unknown condition type: %s" % c.condition_type)
return result
class RegistryFinder(flow_base.FlowBase):
"""This flow looks for registry items matching given criteria."""
friendly_name = "Registry Finder"
category = "/Registry/"
args_type = RegistryFinderArgs
behaviours = flow_base.BEHAVIOUR_BASIC
@classmethod
def GetDefaultArgs(cls, username=None):
del username
return cls.args_type(keys_paths=[
"HKEY_USERS/%%users.sid%%/Software/"
"Microsoft/Windows/CurrentVersion/Run/*"
])
def Start(self):
self.CallFlow(
compatibility.GetName(file_finder.FileFinder),
paths=self.args.keys_paths,
pathtype=rdf_paths.PathSpec.PathType.REGISTRY,
conditions=_ConditionsToFileFinderConditions(self.args.conditions),
action=rdf_file_finder.FileFinderAction.Stat(),
next_state=compatibility.GetName(self.Done))
def Done(self, responses):
if not responses.success:
raise flow_base.FlowError("Registry search failed %s" % responses.status)
for response in responses:
self.SendReply(response)
class ClientRegistryFinder(flow_base.FlowBase):
"""This flow looks for registry items matching given criteria."""
friendly_name = "Client Side Registry Finder"
category = "/Registry/"
args_type = RegistryFinderArgs
behaviours = flow_base.BEHAVIOUR_DEBUG
@classmethod
def GetDefaultArgs(cls, username=None):
del username
return cls.args_type(
keys_paths=["HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/*"])
def Start(self):
self.CallFlow(
compatibility.GetName(file_finder.ClientFileFinder),
paths=self.args.keys_paths,
pathtype=rdf_paths.PathSpec.PathType.REGISTRY,
conditions=_ConditionsToFileFinderConditions(self.args.conditions),
action=rdf_file_finder.FileFinderAction.Stat(),
next_state=compatibility.GetName(self.Done))
def Done(self, responses):
if not responses.success:
raise flow_base.FlowError("Registry search failed %s" % responses.status)
for response in responses:
self.SendReply(response)
class CollectRunKeyBinaries(flow_base.FlowBase):
"""Collect the binaries used by Run and RunOnce keys on the system.
We use the RunKeys artifact to get RunKey command strings for all users and
System. This flow guesses file paths from the strings, expands any
windows system environment variables, and attempts to retrieve the files.
"""
category = "/Registry/"
behaviours = flow_base.BEHAVIOUR_BASIC
def Start(self):
"""Get runkeys via the ArtifactCollectorFlow."""
self.CallFlow(
collectors.ArtifactCollectorFlow.__name__,
artifact_list=["WindowsRunKeys"],
use_raw_filesystem_access=True,
next_state=compatibility.GetName(self.ParseRunKeys))
def ParseRunKeys(self, responses):
"""Get filenames from the RunKeys and download the files."""
filenames = []
client = data_store.REL_DB.ReadClientSnapshot(self.client_id)
kb = client.knowledge_base
for response in responses:
runkey = response.registry_data.string
environ_vars = artifact_utils.GetWindowsEnvironmentVariablesMap(kb)
path_guesses = path_detection_windows.DetectExecutablePaths([runkey],
environ_vars)
if not path_guesses:
self.Log("Couldn't guess path for %s", runkey)
for path in path_guesses:
filenames.append(
rdf_paths.PathSpec(
path=path,
pathtype=config.CONFIG["Server.raw_filesystem_access_pathtype"])
)
if filenames:
self.CallFlow(
transfer.MultiGetFile.__name__,
pathspecs=filenames,
next_state=compatibility.GetName(self.Done))
def Done(self, responses):
for response in responses:
self.SendReply(response)
| apache-2.0 |
Galexrt/zulip | zerver/views/pointer.py | 5 | 1267 |
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from typing import Text
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.models import UserProfile, UserMessage
def get_pointer_backend(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
| apache-2.0 |
arrabito/DIRAC | Core/Utilities/DErrno.py | 1 | 11716 | """ :mod: DErrno
==========================
.. module: DErrno
:synopsis: Error list and utilities for handling errors in DIRAC
This module contains list of errors that can be encountered in DIRAC.
It complements the errno module of python.
It also contains utilities to manipulate these errors.
Finally, it contains a DErrno class that contains an error number
as well as a low level error message. It behaves like a string for
compatibility reasons
In order to add extension specific error, you need to create in your extension the file
Core/Utilities/DErrno.py, which will contain the following dictionary:
* extra_dErrName: keys are the error name, values the number of it
* extra_dErrorCode: same as dErrorCode. keys are the error code, values the name
(we don't simply revert the previous dict in case we do not
have a one to one mapping)
* extra_dStrError: same as dStrError, Keys are the error code, values the error description
* extra_compatErrorString: same as compatErrorString. The compatible error strings are
added to the existing one, and not replacing them.
Example of extension file :
* extra_dErrName = { 'ELHCBSPE' : 3001 }
* extra_dErrorCode = { 3001 : 'ELHCBSPE'}
* extra_dStrError = { 3001 : "This is a description text of the specific LHCb error" }
* extra_compatErrorString = { 3001 : ["living easy, living free"],
DErrno.ERRX : ['An error message for ERRX that is specific to LHCb']}
"""
import os
import imp
import sys
# pylint: disable=bad-continuation
# To avoid conflict, the error numbers should be greater than 1000
# We decided to group the by range of 100 per system
# 1000: Generic
# 1100: Core
# 1200: Framework
# 1300: Interfaces
# 1400: Config
# 1500: WMS / Workflow
# 1600: DMS/StorageManagement
# 1700: RMS
# 1800: Accounting
# 1900: TS
# 2000: Resources and RSS
# ## Generic (10XX)
# Python related: 0X
ETYPE = 1000
EIMPERR = 1001
ENOMETH = 1002
ECONF = 1003
EVALUE = 1004
EEEXCEPTION = 1005
# Files manipulation: 1X
ECTMPF = 1010
EOF = 1011
ERF = 1012
EWF = 1013
ESPF = 1014
# ## Core (11XX)
# Certificates and Proxy: 0X
EX509 = 1100
EPROXYFIND = 1101
EPROXYREAD = 1102
ECERTFIND = 1103
ECERTREAD = 1104
ENOCERT = 1105
ENOCHAIN = 1106
ENOPKEY = 1107
ENOGROUP = 1108
# DISET: 1X
EDISET = 1110
ENOAUTH = 1111
# 3rd party security: 2X
E3RDPARTY = 1120
EVOMS = 1121
# Databases : 3X
EDB = 1130
EMYSQL = 1131
ESQLA = 1132
# Message Queues: 4X
EMQUKN = 1140
EMQNOM = 1141
EMQCONN = 1142
#Elasticsearch
EELNOFOUND = 1146
#config
ESECTION = 1400
#processes
EEZOMBIE = 1147
EENOPID = 1148
# ## WMS/Workflow
EWMSUKN = 1500
EWMSJDL = 1501
EWMSRESC = 1502
EWMSSUBM = 1503
# ## DMS/StorageManagement (16XX)
EFILESIZE = 1601
EGFAL = 1602
EBADCKS = 1603
EFCERR = 1604
# ## RMS (17XX)
ERMSUKN = 1700
# ## TS (19XX)
ETSUKN = 1900
ETSDATA = 1901
# ## Resources and RSS (20XX)
ERESGEN = 2000
ERESUNA = 2001
ERESUNK = 2002
# This translates the integer number into the name of the variable
dErrorCode = {
# ## Generic (10XX)
# 100X: Python related
1000 : 'ETYPE',
1001 : 'EIMPERR',
1002 : 'ENOMETH',
1003 : 'ECONF',
1004 : 'EVALUE',
1005 : 'EEEXCEPTION',
# 101X: Files manipulation
1010 : 'ECTMPF',
1011 : 'EOF',
1012 : 'ERF',
1013 : 'EWF',
1014 : 'ESPF',
# ## Core
# 110X: Certificates and Proxy
1100 : 'EX509',
1101 : 'EPROXYFIND',
1102 : 'EPROXYREAD',
1103 : 'ECERTFIND',
1104 : 'ECERTREAD',
1105 : 'ENOCERT',
1106 : 'ENOCHAIN',
1107 : 'ENOPKEY',
1108 : 'ENOGROUP',
# 111X: DISET
1110 : 'EDISET',
1111 : 'ENOAUTH',
# 112X: 3rd party security
1120 : 'E3RDPARTY',
1121 : 'EVOMS',
# 113X: Databases
1130 : 'EDB',
1131 : 'EMYSQL',
1132 : 'ESQLA',
# 114X: Message Queues
1140 : 'EMQUKN',
1141 : 'EMQNOM',
1142 : 'EMQCONN',
# Elasticsearch
1146 : 'EELNOFOUND',
# Config
1400 : "ESECTION",
#Processes
1147 : 'EEZOMBIE',
1148 : 'EENOPID',
# WMS/Workflow
1500 : 'EWMSUKN',
1501 : 'EWMSJDL',
1502 : 'EWMSRESC',
1503: 'EWMSSUBM',
# DMS/StorageManagement
1601 : 'EFILESIZE',
1602 : 'EGFAL',
1603 : 'EBADCKS',
1604 : "EFCERR",
# RMS
1700 : 'ERMSUKN',
# Resources and RSS
2000 : 'ERESGEN',
2001 : 'ERESUNA',
2002 : 'ERESUNK',
# TS
1900 : "ETSUKN",
1901 : "ETSDATA"}
dStrError = {
# ## Generic (10XX)
# 100X: Python related
ETYPE : "Object Type Error",
EIMPERR : "Failed to import library",
ENOMETH : "No such method or function",
ECONF : "Configuration error",
EVALUE: "Wrong value passed",
EEEXCEPTION: "runtime general exception",
# 101X: Files manipulation
ECTMPF : "Failed to create temporary file",
EOF : "Cannot open file",
ERF : "Cannot read from file",
EWF : "Cannot write to file",
ESPF : "Cannot set permissions to file",
# ## Core
# 110X: Certificates and Proxy
EX509 : "Generic Error with X509",
EPROXYFIND : "Can't find proxy",
EPROXYREAD : "Can't read proxy",
ECERTFIND : "Can't find certificate",
ECERTREAD : "Can't read certificate",
ENOCERT : "No certificate loaded",
ENOCHAIN : "No chain loaded",
ENOPKEY : "No private key loaded",
ENOGROUP: "No DIRAC group",
# 111X: DISET
EDISET : "DISET Error",
ENOAUTH : "Unauthorized query",
# 112X: 3rd party security
E3RDPARTY: "3rd party security service error",
EVOMS : "VOMS Error",
# 113X: Databases
EDB : "Database Error",
EMYSQL : "MySQL Error",
ESQLA : "SQLAlchemy Error",
# 114X: Message Queues
EMQUKN : "Unknown MQ Error",
EMQNOM : "No messages",
EMQCONN : "MQ connection failure",
# 114X Elasticsearch
EELNOFOUND: "Index not found",
# Config
ESECTION : "Section is not found",
#processes
EEZOMBIE: "Zombie process",
EENOPID: "No PID of process",
# WMS/Workflow
EWMSUKN : "Unknown WMS error",
EWMSJDL : "Invalid job description",
EWMSRESC: "Job to reschedule",
EWMSSUBM: "Job submission error",
# DMS/StorageManagement
EFILESIZE : "Bad file size",
EGFAL : "Error with the gfal call",
EBADCKS : "Bad checksum",
EFCERR : "FileCatalog error",
# RMS
ERMSUKN : "Unknown RMS error",
# Resources and RSS
ERESGEN: "Unknown Resource Failure",
ERESUNA: "Resource not available",
ERESUNK: "Unknown Resource",
# TS
ETSUKN : "Unknown Transformation System Error",
ETSDATA : "Invalid Input Data definition"}
def strerror(code):
""" This method wraps up os.strerror, and behave the same way.
It completes it with the DIRAC specific errors.
"""
if code == 0 :
return "Undefined error"
errMsg = "Unknown error %s" % code
try:
errMsg = dStrError[code]
except KeyError:
# It is not a DIRAC specific error, try the os one
try:
errMsg = os.strerror( code )
# On some system, os.strerror raises an exception with unknown code,
# on others, it returns a message...
except ValueError:
pass
return errMsg
def cmpError( inErr, candidate ):
""" This function compares an error (in its old form (a string or dictionary) or in its int form
with a candidate error code.
:param inErr: a string, an integer, a S_ERROR dictionary
:type inErr: str or int or S_ERROR
:param int candidate: error code to compare with
:return: True or False
If an S_ERROR instance is passed, we compare the code with S_ERROR['Errno']
If it is a Integer, we do a direct comparison
If it is a String, we use strerror to check the error string
"""
if isinstance(inErr, basestring): # old style
# Compare error message strings
errMsg = strerror(candidate)
return errMsg in inErr
elif isinstance(inErr, dict): # if the S_ERROR structure is given
# Check if Errno defined in the dict
errorNumber = inErr.get('Errno')
if errorNumber:
return errorNumber == candidate
errMsg = strerror(candidate)
return errMsg in inErr.get('Message', '')
elif isinstance(inErr, int):
return inErr == candidate
else:
raise TypeError("Unknown input error type %s" % type(inErr))
def includeExtensionErrors():
""" Merge all the errors of all the extensions into the errors of these modules
Should be called only at the initialization of DIRAC, so by the parseCommandLine,
dirac-agent.py, dirac-service.py, dirac-executor.py
"""
def __recurseImport( modName, parentModule = None, fullName = False ):
""" Internal function to load modules
"""
if isinstance( modName, basestring ):
modName = modName.split( "." )
if not fullName:
fullName = ".".join( modName )
try:
if parentModule:
impData = imp.find_module( modName[0], parentModule.__path__ )
else:
impData = imp.find_module( modName[0] )
impModule = imp.load_module( modName[0], *impData )
if impData[0]:
impData[0].close()
except ImportError:
return None
if len( modName ) == 1:
return impModule
return __recurseImport( modName[1:], impModule, fullName = fullName )
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
allExtensions = CSGlobals.getCSExtensions()
for extension in allExtensions:
ext_derrno = None
try:
ext_derrno = __recurseImport( '%sDIRAC.Core.Utilities.DErrno' % extension )
if ext_derrno:
# The next 3 dictionary MUST be present for consistency
# Global name of errors
sys.modules[__name__].__dict__.update( ext_derrno.extra_dErrName )
# Dictionary with the error codes
sys.modules[__name__].dErrorCode.update( ext_derrno.extra_dErrorCode )
# Error description string
sys.modules[__name__].dStrError.update( ext_derrno.extra_dStrError )
# extra_compatErrorString is optional
for err in getattr( ext_derrno, 'extra_compatErrorString', [] ) :
sys.modules[__name__].compatErrorString.setdefault( err, [] ).extend( ext_derrno.extra_compatErrorString[err] )
except:
pass
| gpl-3.0 |
JaviMerino/trappy | trappy/thermal.py | 2 | 9812 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the power allocator trace in the current
directory's trace.dat"""
from collections import OrderedDict
import pandas as pd
import re
from trappy.base import Base
from trappy.dynamic import register_ftrace_parser
class Thermal(Base):
"""Process the thermal framework data in a FTrace dump"""
unique_word = "thermal_temperature:"
"""The unique word that will be matched in a trace line"""
name = "thermal"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "id"
"""The Pivot along which the data is orthogonal"""
def plot_temperature(self, control_temperature=None, title="", width=None,
height=None, ylim="range", ax=None, legend_label=""):
"""Plot the temperature.
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param legend_label: Label for the legend
:type legend_label: str
:param title: The title of the plot
:type title: str
:param control_temperature: If control_temp is a
:mod:`pd.Series` representing the (possible)
variation of :code:`control_temp` during the
run, draw it using a dashed yellow line.
Otherwise, only the temperature is plotted.
:type control_temperature: :mod:`pandas.Series`
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
from matplotlib import pyplot as plt
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
title = normalize_title("Temperature", title)
if len(self.data_frame) == 0:
raise ValueError("Empty DataFrame")
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(self.data_frame["temp"] / 1000).plot(ax=ax, label=temp_label)
if control_temperature is not None:
ct_label = normalize_title("Control", legend_label)
control_temperature.plot(ax=ax, color="y", linestyle="--",
label=ct_label)
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_temperature_hist(self, ax, title):
"""Plot a temperature histogram
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
"""
from trappy.plot_utils import normalize_title, plot_hist
temps = self.data_frame["temp"] / 1000
title = normalize_title("Temperature", title)
xlim = (0, temps.max())
plot_hist(temps, ax, title, "C", 30, "Temperature", xlim, "default")
register_ftrace_parser(Thermal, "thermal")
class ThermalGovernor(Base):
"""Process the power allocator data in a ftrace dump"""
unique_word = "thermal_power_allocator:"
"""The unique word that will be matched in a trace line"""
name = "thermal_governor"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "thermal_zone_id"
"""The Pivot along which the data is orthogonal"""
def plot_temperature(self, title="", width=None, height=None, ylim="range",
ax=None, legend_label=""):
"""Plot the temperature"""
from matplotlib import pyplot as plt
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
curr_temp = dfr["current_temperature"]
control_temp_series = (curr_temp + dfr["delta_temperature"]) / 1000
title = normalize_title("Temperature", title)
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(curr_temp / 1000).plot(ax=ax, label=temp_label)
control_temp_series.plot(ax=ax, color="y", linestyle="--",
label="control temperature")
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_input_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot input power
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
:param actor_order: An array showing the order in which the actors
were registered. The array values are the labels that
will be used in the input and output power plots.
For Example:
::
["GPU", "A15", "A7"]
:type actor_order: list
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match("req_power[0-9]+", s)]
plot_dfr = dfr[in_cols]
# Rename the columns from "req_power0" to "A15" or whatever is
# in actor_order. Note that we can do it just with an
# assignment because the columns are already sorted (i.e.:
# req_power0, req_power1...)
plot_dfr.columns = actor_order
title = normalize_title("Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_weighted_input_power(self, actor_weights, title="", width=None,
height=None, ax=None):
"""Plot weighted input power
:param actor_weights: An array of tuples. First element of the
tuple is the name of the actor, the second is the weight. The
array is in the same order as the :code:`req_power` appear in the
trace.
:type actor_weights: list
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match(r"req_power\d+", s)]
plot_dfr_dict = OrderedDict()
for in_col, (name, weight) in zip(in_cols, actor_weights):
plot_dfr_dict[name] = dfr[in_col] * weight / 1024
plot_dfr = pd.DataFrame(plot_dfr_dict)
title = normalize_title("Weighted Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_output_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot output power
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
:param actor_order: An array showing the order in which the actors
were registered. The array values are the labels that
will be used in the input and output power plots.
For Example:
::
["GPU", "A15", "A7"]
:type actor_order: list
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
out_cols = [s for s in self.data_frame.columns
if re.match("granted_power[0-9]+", s)]
# See the note in plot_input_power()
plot_dfr = self.data_frame[out_cols]
plot_dfr.columns = actor_order
title = normalize_title("Output Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_inout_power(self, title=""):
"""Make multiple plots showing input and output power for each actor
:param title: The title of the plot
:type title: str
"""
from trappy.plot_utils import normalize_title
dfr = self.data_frame
actors = []
for col in dfr.columns:
match = re.match("P(.*)_in", col)
if match and col != "Ptot_in":
actors.append(match.group(1))
for actor in actors:
cols = ["P" + actor + "_in", "P" + actor + "_out"]
this_title = normalize_title(actor, title)
dfr[cols].plot(title=this_title)
register_ftrace_parser(ThermalGovernor, "thermal")
| apache-2.0 |
Godiyos/python-for-android | python-modules/twisted/twisted/internet/pollreactor.py | 56 | 6856 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A poll() based implementation of the twisted main loop.
To install the event loop (and you should do this before any connections,
listeners or connectors are added)::
from twisted.internet import pollreactor
pollreactor.install()
"""
# System imports
import errno, sys
from select import error as SelectError, poll
from select import POLLIN, POLLOUT, POLLHUP, POLLERR, POLLNVAL
from zope.interface import implements
# Twisted imports
from twisted.python import log
from twisted.internet import main, posixbase, error
from twisted.internet.interfaces import IReactorFDSet
POLL_DISCONNECTED = (POLLHUP | POLLERR | POLLNVAL)
class PollReactor(posixbase.PosixReactorBase):
"""
A reactor that uses poll(2).
@ivar _poller: A L{poll} which will be used to check for I/O
readiness.
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to arbitrary
values (this is essentially a set). Keys in this dictionary will be
registered with C{_poller} for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""
Initialize polling object, file descriptor tracking dictionaries, and
the base class.
"""
self._poller = poll()
self._selectables = {}
self._reads = {}
self._writes = {}
posixbase.PosixReactorBase.__init__(self)
def _updateRegistration(self, fd):
"""Register/unregister an fd with the poller."""
try:
self._poller.unregister(fd)
except KeyError:
pass
mask = 0
if fd in self._reads:
mask = mask | POLLIN
if fd in self._writes:
mask = mask | POLLOUT
if mask != 0:
self._poller.register(fd, mask)
else:
if fd in self._selectables:
del self._selectables[fd]
def _dictRemove(self, selectable, mdict):
try:
# the easy way
fd = selectable.fileno()
# make sure the fd is actually real. In some situations we can get
# -1 here.
mdict[fd]
except:
# the hard way: necessary because fileno() may disappear at any
# moment, thanks to python's underlying sockets impl
for fd, fdes in self._selectables.items():
if selectable is fdes:
break
else:
# Hmm, maybe not the right course of action? This method can't
# fail, because it happens inside error detection...
return
if fd in mdict:
del mdict[fd]
self._updateRegistration(fd)
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
fd = reader.fileno()
if fd not in self._reads:
self._selectables[fd] = reader
self._reads[fd] = 1
self._updateRegistration(fd)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
fd = writer.fileno()
if fd not in self._writes:
self._selectables[fd] = writer
self._writes[fd] = 1
self._updateRegistration(fd)
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
return self._dictRemove(reader, self._reads)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
return self._dictRemove(writer, self._writes)
def removeAll(self):
"""
Remove all selectables, and return a list of them.
"""
return self._removeAll(
[self._selectables[fd] for fd in self._reads],
[self._selectables[fd] for fd in self._writes])
def doPoll(self, timeout):
"""Poll the poller for new events."""
if timeout is not None:
timeout = int(timeout * 1000) # convert seconds to milliseconds
try:
l = self._poller.poll(timeout)
except SelectError, e:
if e[0] == errno.EINTR:
return
else:
raise
_drdw = self._doReadOrWrite
for fd, event in l:
try:
selectable = self._selectables[fd]
except KeyError:
# Handles the infrequent case where one selectable's
# handler disconnects another.
continue
log.callWithLogger(selectable, _drdw, selectable, fd, event)
doIteration = doPoll
def _doReadOrWrite(self, selectable, fd, event):
why = None
inRead = False
if event & POLL_DISCONNECTED and not (event & POLLIN):
if fd in self._reads:
why = main.CONNECTION_DONE
inRead = True
else:
why = main.CONNECTION_LOST
else:
try:
if event & POLLIN:
why = selectable.doRead()
inRead = True
if not why and event & POLLOUT:
why = selectable.doWrite()
inRead = False
if not selectable.fileno() == fd:
why = error.ConnectionFdescWentAway('Filedescriptor went away')
inRead = False
except:
log.deferr()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def install():
"""Install the poll() reactor."""
p = PollReactor()
from twisted.internet.main import installReactor
installReactor(p)
__all__ = ["PollReactor", "install"]
| apache-2.0 |
shengqh/ngsperl | lib/Visualization/plotGene.py | 1 | 7060 | import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
cqsdir = os.path.abspath(os.path.dirname(currentdir) + "/CQS")
sys.path.insert(0,cqsdir)
import logging
import argparse
import string
import subprocess
from LocusItem import LocusItem, readBedFile
from FileListUtils import readUniqueHashMap
def main():
DEBUG = False
NOT_DEBUG = not DEBUG
parser = argparse.ArgumentParser(description="Draw bam plot based on peak list.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', required=NOT_DEBUG, help="Input bed file")
parser.add_argument('-b', '--bamListFile', action='store', nargs='?', required=NOT_DEBUG, help="Sample bam file list")
parser.add_argument('-s', '--sizeFactorFile', action='store', nargs='?', required=NOT_DEBUG, help="Sample chromosome size factor file")
parser.add_argument('-e', '--extend_bases', action='store', type=int, default=0, nargs='?', help="Extending X bases before and after coordinates")
parser.add_argument('-g', '--plot_gene', action='store_true', help="Plot hg38 gene track")
parser.add_argument('-o', '--output', action='store', nargs='?', required=NOT_DEBUG, help="Output folder")
args = parser.parse_args()
if(DEBUG):
# args.input = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/20190504_smallRNA_as_chipseq__fileList1.list"
# args.groupsFile = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/20190504_smallRNA_as_chipseq__fileList2.list"
# args.bamListFile = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/20190504_smallRNA_as_chipseq__fileList3.list"
# args.output = "/scratch/cqs/shengq2/vickers/20190504_smallRNA_as_chipseq_GCF_000005845.2_ASM584v2/plotPeak/result/Control.pdf"
args.input = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/annotation_genes_locus/result/linton_exomeseq_3321.bed"
args.bamListFile = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/GATK4_CNV_Germline_8_PlotGeneCNV/result/linton_exomeseq_3321__fileList3.list"
args.output = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/GATK4_CNV_Germline_8_PlotGeneCNV/result/linton_exomeseq_3321.position.txt"
args.sizeFactorFile = "/scratch/cqs/shengq2/macrae_linton/20190517_linton_exomeseq_3321_human/background/linton_exomeseq_3321.excluded.bed.sizefactor"
logger = logging.getLogger('plotGene')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
print(args)
bamMap = readUniqueHashMap(args.bamListFile)
sampleNames = sorted(bamMap.keys())
sampleFiles = [bamMap[sampleName] for sampleName in sampleNames]
outputFolder = os.path.dirname(args.output)
bedFile = args.input
logger.info("processing " + bedFile + "...")
bedResultFile = args.output
bamListFile = bedResultFile + ".bam.list"
with open(bamListFile, "w") as flist:
for sampleFile in sampleFiles:
flist.write(sampleFile + "\n")
chrMap = {}
with open(args.sizeFactorFile, "rt") as fin:
for line in fin:
parts = line.rstrip().split('\t')
chrom = parts[0]
chromKey = chrom.replace("chr","")
chrMap[chromKey] = chrom
chrMap[chrom] = chrom
#print(chrMap)
bedResultTmpFile = bedResultFile + ".tmp"
with open(bedResultTmpFile, "wt") as fout:
fout.write("File\tFeature\tLocus\tPosition\tPositionCount\tMaxCount\tPercentage\n")
posData = []
locusList = readBedFile(bedFile)
for locus in locusList:
locus.Chromosome = chrMap[locus.Chromosome]
locusName = locus.getName()
locusString = locus.getLocusString(args.extend_bases)
logger.info(" processing " + locus.getLocusString() + " ...")
locusData = []
locusData.append([]) #add position from depth
for sampleName in sampleNames:
locusData.append([])
posData.append([locus, locusData])
proc = subprocess.Popen(["samtools", "depth", "-f", bamListFile, "-r", locusString, "-d", "0"], stdout=subprocess.PIPE)
for pline in proc.stdout:
pparts = pline.rstrip().decode("utf-8").split("\t")
position = int(pparts[1])
locusData[0].append(position)
for idx in range(len(sampleNames)):
locusData[idx+1].append(int(pparts[idx+2]))
positions = locusData[0]
for idx in range(len(sampleNames)):
sampleCount = locusData[idx+1]
if len(sampleCount) == 0:
maxCount = 0
else:
maxCount = max(sampleCount)
if maxCount == 0:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, locus.Start, 0, 0, 0))
continue
lastZero = True
lastPosition = positions[0] - 1
for cIdx in range(len(positions)):
curPosition = positions[cIdx]
if curPosition != lastPosition + 1:
if not lastZero:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, lastPosition + 1, 0, maxCount, 0))
lastZero = True
if sampleCount[cIdx] != 0:
if lastZero:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[cIdx] - 1, 0, maxCount, 0))
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[cIdx], sampleCount[cIdx], maxCount, sampleCount[cIdx] * 1.0 / maxCount))
lastZero = False
else:
if not lastZero:
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[cIdx], 0, maxCount, 0))
lastZero = True
lastPosition = curPosition
fout.write("%s\t%s\t%s\t%d\t%d\t%d\t%lf\n" % (sampleNames[idx], locusName, locusString, positions[len(positions)-1] + 1, 0, maxCount, 0))
if os.path.exists(bedResultFile):
os.remove(bedResultFile)
os.remove(bamListFile)
os.rename(bedResultTmpFile, bedResultFile)
realpath = os.path.dirname(os.path.realpath(__file__))
#rPath = realpath + "/plotGeneHuman.r" if args.plot_gene else realpath + "/plotGene.r"
#plotGeneHuman is still under development
rPath = realpath + "/plotGene.r" if args.plot_gene else realpath + "/plotGene.r"
targetR = bedResultFile + ".r"
with open(targetR, "wt") as fout:
fout.write("inputFile<-\"%s\"\n" % bedResultFile)
fout.write("outputPrefix<-\"%s\"\n" % bedResultFile)
fout.write("sizeFactorFile<-\"%s\"\n\n" % args.sizeFactorFile)
with open(rPath, "r") as fin:
for line in fin:
line = line.rstrip()
fout.write(line + "\n")
cmd = "R --vanilla -f " + targetR
logger.info(cmd)
os.system(cmd)
logger.info("done.")
main()
| apache-2.0 |
faircloth-lab/uce-probe-design | run_lastz.py | 1 | 6837 | #!/usr/bin/env python
# encoding: utf-8
"""
run_lastz.py
Created by Brant Faircloth on 2010-02-24.
Copyright (c) 2010 Brant Faircloth. All rights reserved.
# Description
A helper script to run lastz.
"""
import pdb
import sys
import os
import time
import optparse
import tempfile
import subprocess
import bx.seq.twobit
import multiprocessing
def interface():
'''Get the starting parameters from a configuration file'''
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--target', dest = 'target', action='store', \
type='string', default = None, help='The path to the target file (2bit)', \
metavar='FILE')
p.add_option('--query', dest = 'query', action='store', \
type='string', default = None, help='The path to the query file (2bit)', \
metavar='FILE')
p.add_option('--output', dest = 'output', action='store', \
type='string', default = None, help='The path to the output file', \
metavar='FILE')
p.add_option('--nprocs', dest = 'nprocs', action='store', \
type='int', default = 1, help='The number of processors to use')
p.add_option('--huge', dest = 'huge', action='store_true', default=False, \
help='Deal with poorly assembled (many scaffolds) genome sequences')
p.add_option('--size', dest = 'size', action='store', \
type='int', default = 10000000, help='The chunk size (in bp) to stick in a \
file while using the --huge option')
(options,arg) = p.parse_args()
for f in [options.target, options.query, options.output]:
if not f:
p.print_help()
sys.exit(2)
if f != options.output and not os.path.isfile(f):
print "You must provide a valid path to the query/target file."
p.print_help()
sys.exit(2)
return options, arg
def q_runner(n_procs, list_item, function, *args):
'''generic function used to start worker processes'''
task_queue = multiprocessing.Queue()
results_queue = multiprocessing.JoinableQueue()
if args:
arguments = (task_queue, results_queue,) + args
else:
arguments = (task_queue, results_queue,)
results = []
# reduce processer count if proc count > files
if len(list_item) < n_procs:
n_procs = len(list_item)
for l in list_item:
task_queue.put(l)
for _ in range(n_procs):
p = multiprocessing.Process(target=function, args=arguments).start()
#print 'Starting %s' % function
for _ in range(len(list_item)):
# indicated done results processing
results.append(results_queue.get())
results_queue.task_done()
#tell child processes to stop
for _ in range(n_procs):
task_queue.put('STOP')
# join the queue until we're finished processing results
results_queue.join()
# not closing the Queues caused me untold heartache and suffering
task_queue.close()
results_queue.close()
return results
def lastzParams(query, target, temp_out):
cli = \
'lastz {0}[nameparse=full] {1}[nameparse=full]\
--hspthresh=3000 \
--gappedthresh=3000 \
--ydrop=9400 \
--inner=0 \
--gap=400,30 \
--output={2} \
--format=lav'.format(query, target, temp_out)
return cli
def lastz(input, output):
'''docstring for worker2'''
for chromo, probe in iter(input.get, 'STOP'):
print '\t%s' % chromo
temp_fd, temp_out = tempfile.mkstemp(suffix='.lastz')
os.close(temp_fd)
cli = lastzParams(chromo, probe, temp_out)
lzstdout, lztstderr = subprocess.Popen(cli, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(None)
if lztstderr:
output.put(lztstderr)
else:
output.put(temp_out)
def SingleProcLastz(input, output):
'''docstring for worker2'''
#pdb.set_trace()
chromo, probe = input
temp_fd, temp_out = tempfile.mkstemp(suffix='.lastz')
os.close(temp_fd)
cli = lastzParams(chromo, probe, temp_out)
#pdb.set_trace()
lzstdout, lztstderr = subprocess.Popen(cli, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(None)
if lztstderr:
output.append(lztstderr)
else:
output.append(tmp_out)
return output
def main():
start_time = time.time()
print 'Started: ', time.strftime("%a %b %d, %Y %H:%M:%S", time.localtime(start_time))
options, arg = interface()
if not options.huge:
# get individual records from the 2bit file
chromos = [os.path.join(options.target, c) for c in bx.seq.twobit.TwoBitFile(file(options.target)).keys()]
else:
chromos = []
# split target file into `options.size` (~10 Mbp) chunks
temp_fd, temp_out = tempfile.mkstemp(suffix='.fasta')
os.close(temp_fd)
temp_out_handle = open(temp_out, 'w')
tb = bx.seq.twobit.TwoBitFile(file(options.target))
sequence_length = 0
print 'Running with the --huge option. Chunking files into {0} bp...'.format(options.size)
for seq in tb.keys():
sequence = tb[seq][0:]
sequence_length += len(sequence)
# write it to the outfile
temp_out_handle.write('>{0}\n{1}\n'.format(seq, sequence))
if sequence_length > options.size:
temp_out_handle.close()
# put tempfile name on stack
chromos.append(temp_out + '[multiple]')
# open a new temp file
temp_fd, temp_out = tempfile.mkstemp(suffix='.fasta')
os.close(temp_fd)
temp_out_handle = open(temp_out, 'w')
# reset sequence length
sequence_length = 0
probes = (options.query,) * len(chromos)
cp = zip(chromos, probes)
# put those record names on the stack
print "Running the targets against %s queries..." % len(chromos)
if options.nprocs == 1:
results = []
for each in cp:
print each
print results
results = SingleProcLastz(each, results)
else:
results = q_runner(options.nprocs, cp, lastz)
outp = open(options.output, 'wb')
print "Writing the results file..."
#pdb.set_trace()
for f in results:
print '\t%s' % f
# read the file
outp.write(open(f, 'rb').read())
# cleanup the lastz output files
os.remove(f)
outp.close()
print 'Cleaning up the chunked files...'
if options.huge:
for f in chromos:
# cleanup the chunked files
os.remove(f.strip('[multiple]'))
# stats
end_time = time.time()
print 'Ended: ', time.strftime("%a %b %d, %Y %H:%M:%S", time.localtime(end_time))
print 'Time for execution: ', (end_time - start_time) / 60, 'minutes'
if __name__ == '__main__':
main()
| bsd-3-clause |
CingHu/neutron-ustack | neutron/plugins/cisco/cfg_agent/device_drivers/devicedriver_api.py | 5 | 5750 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hareesh Puthalath, Cisco Systems, Inc.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class RoutingDriverBase(object):
"""Base class that defines an abstract interface for the Routing Driver.
This class defines the abstract interface/API for the Routing and
NAT related operations. Driver class corresponding to a hosting device
should inherit this base driver and implement its methods.
RouterInfo object (neutron.plugins.cisco.cfg_agent.router_info.RouterInfo)
is a wrapper around the router dictionary, with attributes for easy access
to parameters.
"""
@abc.abstractmethod
def router_added(self, router_info):
"""A logical router was assigned to the hosting device.
:param router_info: RouterInfo object for this router
:return None
"""
pass
@abc.abstractmethod
def router_removed(self, router_info):
"""A logical router was de-assigned from the hosting device.
:param router_info: RouterInfo object for this router
:return None
"""
pass
@abc.abstractmethod
def internal_network_added(self, router_info, port):
"""An internal network was connected to a router.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network is connected
:return None
"""
pass
@abc.abstractmethod
def internal_network_removed(self, router_info, port):
"""An internal network was removed from a router.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network was connected
:return None
"""
pass
@abc.abstractmethod
def external_gateway_added(self, router_info, ex_gw_port):
"""An external network was added to a router.
:param router_info: RouterInfo object of the router
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:return None
"""
pass
@abc.abstractmethod
def external_gateway_removed(self, router_info, ex_gw_port):
"""An external network was removed from the router.
:param router_info: RouterInfo object of the router
:param ex_gw_port : port dictionary for the port where the external
gateway network was connected
:return None
"""
pass
@abc.abstractmethod
def enable_internal_network_NAT(self, router_info, port, ex_gw_port):
"""Enable NAT on an internal network.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network is connected
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:return None
"""
pass
@abc.abstractmethod
def disable_internal_network_NAT(self, router_info, port, ex_gw_port):
"""Disable NAT on an internal network.
:param router_info: RouterInfo object for this router
:param port : port dictionary for the port where the internal
network is connected
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:return None
"""
pass
@abc.abstractmethod
def floating_ip_added(self, router_info, ex_gw_port,
floating_ip, fixed_ip):
"""A floating IP was added.
:param router_info: RouterInfo object for this router
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:param floating_ip: Floating IP as a string
:param fixed_ip : Fixed IP of internal internal interface as
a string
:return None
"""
pass
@abc.abstractmethod
def floating_ip_removed(self, router_info, ex_gw_port,
floating_ip, fixed_ip):
"""A floating IP was removed.
:param router_info: RouterInfo object for this router
:param ex_gw_port : port dictionary for the port where the external
gateway network is connected
:param floating_ip: Floating IP as a string
:param fixed_ip: Fixed IP of internal internal interface as a string
:return None
"""
pass
@abc.abstractmethod
def routes_updated(self, router_info, action, route):
"""Routes were updated for router.
:param router_info: RouterInfo object for this router
:param action : Action on the route , either 'replace' or 'delete'
:param route: route dictionary with keys 'destination' & 'next_hop'
:return None
"""
pass
| apache-2.0 |
wavesoft/CCLib | Python/cc_write_flash.py | 1 | 2943 | #!/usr/bin/python
#
# CCLib_proxy Utilities
# Copyright (c) 2014 Ioannis Charalampidis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
from cclib import CCHEXFile, getOptions, openCCDebugger
import sys
# Get serial port either form environment or from arguments
opts = getOptions("Generic CCDebugger Flash Writer Tool", hexIn=True,
erase="Full chip erase before write", offset=":Offset the addresses in the .hex file by this value")
# Open debugger
try:
dbg = openCCDebugger(opts['port'], enterDebug=opts['enter'])
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(1)
# Get offset
offset = 0
if opts['offset']:
if opts['offset'][0:2] == "0x":
offset = int(opts['offset'], 16)
else:
offset = int(opts['offset'])
print("NOTE: The memory addresses are offset by %i bytes!" % offset)
# Get bluegiga-specific info
serial = dbg.getSerial()
# Parse the HEX file
hexFile = CCHEXFile( opts['in'] )
hexFile.load()
# Display sections & calculate max memory usage
maxMem = 0
print("Sections in %s:\n" % opts['in'])
print(" Addr. Size")
print("-------- -------------")
for mb in hexFile.memBlocks:
# Calculate top position
memTop = mb.addr + mb.size
if memTop > maxMem:
maxMem = memTop
# Print portion
print(" 0x%04x %i B " % (mb.addr + offset, mb.size))
print("")
# Check for oversize data
if maxMem > (dbg.chipInfo['flash'] * 1024):
print("ERROR: Data too bit to fit in chip's memory!")
sys.exit(4)
# Confirm
erasePrompt = "OVERWRITE"
if opts['erase']:
erasePrompt = "ERASE and REPROGRAM"
print("This is going to %s the chip. Are you sure? <y/N>: " % erasePrompt, end=' ')
ans = sys.stdin.readline()[0:-1]
if (ans != "y") and (ans != "Y"):
print("Aborted")
sys.exit(2)
# Flashing messages
print("\nFlashing:")
# Send chip erase
if opts['erase']:
print(" - Chip erase...")
try:
dbg.chipErase()
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(3)
# Flash memory
dbg.pauseDMA(False)
print(" - Flashing %i memory blocks..." % len(hexFile.memBlocks))
for mb in hexFile.memBlocks:
# Flash memory block
print(" -> 0x%04x : %i bytes " % (mb.addr + offset, mb.size))
try:
dbg.writeCODE( mb.addr + offset, mb.bytes, verify=True, showProgress=True )
except Exception as e:
print("ERROR: %s" % str(e))
sys.exit(3)
# Done
print("\nCompleted")
print("")
| gpl-3.0 |
orioncoin-dev/orioncoin | contrib/testgen/gen_base58_test_vectors.py | 1064 | 4344 | #!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| mit |
DepthDeluxe/ansible | lib/ansible/plugins/action/copy.py | 18 | 15161 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import stat
import tempfile
from ansible.constants import mk_boolean as boolean
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
source = self._task.args.get('src', None)
content = self._task.args.get('content', None)
dest = self._task.args.get('dest', None)
raw = boolean(self._task.args.get('raw', 'no'))
force = boolean(self._task.args.get('force', 'yes'))
remote_src = boolean(self._task.args.get('remote_src', False))
follow = boolean(self._task.args.get('follow', False))
decrypt = boolean(self._task.args.get('decrypt', True))
result['failed'] = True
if (source is None and content is None) or dest is None:
result['msg'] = "src (or content) and dest are required"
elif source is not None and content is not None:
result['msg'] = "src and content are mutually exclusive"
elif content is not None and dest is not None and dest.endswith("/"):
result['msg'] = "dest must be a file if content is defined"
else:
del result['failed']
if result.get('failed'):
return result
# Check if the source ends with a "/"
source_trailing_slash = False
if source:
source_trailing_slash = self._connection._shell.path_has_trailing_slash(source)
# Define content_tempfile in case we set it after finding content populated.
content_tempfile = None
# If content is defined make a temp file and write the content into it.
if content is not None:
try:
# If content comes to us as a dict it should be decoded json.
# We need to encode it back into a string to write it out.
if isinstance(content, dict) or isinstance(content, list):
content_tempfile = self._create_content_tempfile(json.dumps(content))
else:
content_tempfile = self._create_content_tempfile(content)
source = content_tempfile
except Exception as err:
result['failed'] = True
result['msg'] = "could not write content temp file: %s" % to_native(err)
return result
# if we have first_available_file in our vars
# look up the files and use the first one we find as src
elif remote_src:
result.update(self._execute_module(task_vars=task_vars))
return result
else: # find in expected paths
try:
source = self._find_needle('files', source)
except AnsibleError as e:
result['failed'] = True
result['msg'] = to_text(e)
return result
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
source_files = []
# If source is a directory populate our list else source is a file and translate it to a tuple.
if os.path.isdir(to_bytes(source, errors='surrogate_or_strict')):
# Get the amount of spaces to remove to get the relative path.
if source_trailing_slash:
sz = len(source)
else:
sz = len(source.rsplit('/', 1)[0]) + 1
# Walk the directory and append the file tuples to source_files.
for base_path, sub_folders, files in os.walk(to_bytes(source), followlinks=True):
for file in files:
full_path = to_text(os.path.join(base_path, file), errors='surrogate_or_strict')
rel_path = full_path[sz:]
if rel_path.startswith('/'):
rel_path = rel_path[1:]
source_files.append((full_path, rel_path))
# If it's recursive copy, destination is always a dir,
# explicitly mark it so (note - copy module relies on this).
if not self._connection._shell.path_has_trailing_slash(dest):
dest = self._connection._shell.join_path(dest, '')
else:
source_files.append((source, os.path.basename(source)))
changed = False
module_return = dict(changed=False)
# A register for if we executed a module.
# Used to cut down on command calls when not recursive.
module_executed = False
# Tell _execute_module to delete the file if there is one file.
delete_remote_tmp = (len(source_files) == 1)
# If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
if not delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
# expand any user home dir specifier
dest = self._remote_expand_user(dest)
# Keep original value for mode parameter
mode_value = self._task.args.get('mode', None)
diffs = []
for source_full, source_rel in source_files:
# If the local file does not exist, get_real_file() raises AnsibleFileNotFound
try:
source_full = self._loader.get_real_file(source_full, decrypt=decrypt)
except AnsibleFileNotFound as e:
result['failed'] = True
result['msg'] = "could not find src=%s, %s" % (source_full, e)
self._remove_tmp_path(tmp)
return result
# Get the local mode and set if user wanted it preserved
# https://github.com/ansible/ansible-modules-core/issues/1124
if self._task.args.get('mode', None) == 'preserve':
lmode = '0%03o' % stat.S_IMODE(os.stat(source_full).st_mode)
self._task.args['mode'] = lmode
# This is kind of optimization - if user told us destination is
# dir, do path manipulation right away, otherwise we still check
# for dest being a dir via remote call below.
if self._connection._shell.path_has_trailing_slash(dest):
dest_file = self._connection._shell.join_path(dest, source_rel)
else:
dest_file = self._connection._shell.join_path(dest)
# Attempt to get remote file info
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp, checksum=force)
if dest_status['exists'] and dest_status['isdir']:
# The dest is a directory.
if content is not None:
# If source was defined as content remove the temporary file and fail out.
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._remove_tmp_path(tmp)
result['failed'] = True
result['msg'] = "can not use content with a dir as dest"
return result
else:
# Append the relative source location to the destination and get remote stats again
dest_file = self._connection._shell.join_path(dest, source_rel)
dest_status = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=follow, tmp=tmp, checksum=force)
if dest_status['exists'] and not force:
# remote_file exists so continue to next iteration.
continue
# Generate a hash of the local file.
local_checksum = checksum(source_full)
if local_checksum != dest_status['checksum']:
# The checksums don't match and we will change or error out.
changed = True
# Create a tmp path if missing only if this is not recursive.
# If this is recursive we already have a tmp path.
if delete_remote_tmp:
if tmp is None or "-tmp-" not in tmp:
tmp = self._make_tmp_path()
if self._play_context.diff and not raw:
diffs.append(self._get_diff_data(dest_file, source_full, task_vars))
if self._play_context.check_mode:
self._remove_tempfile_if_content_defined(content, content_tempfile)
changed = True
module_return = dict(changed=True)
continue
# Define a remote directory that we will copy the file to.
tmp_src = self._connection._shell.join_path(tmp, 'source')
remote_path = None
if not raw:
remote_path = self._transfer_file(source_full, tmp_src)
else:
self._transfer_file(source_full, dest_file)
# We have copied the file remotely and no longer require our content_tempfile
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
# fix file permissions when the copy is done as a different user
if remote_path:
self._fixup_perms2((tmp, remote_path))
if raw:
# Continue to next iteration if raw is defined.
continue
# Run the copy module
# src and dest here come after original and override them
# we pass dest only to make sure it includes trailing slash in case of recursive copy
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
dest=dest,
original_basename=source_rel,
)
)
# remove action plugin only keys
for key in ('content', 'decrypt'):
if key in new_module_args:
del new_module_args[key]
module_return = self._execute_module(module_name='copy',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True
else:
# no need to transfer the file, already correct hash, but still need to call
# the file module in case we want to change attributes
self._remove_tempfile_if_content_defined(content, content_tempfile)
self._loader.cleanup_tmp_file(source_full)
if raw:
# Continue to next iteration if raw is defined.
self._remove_tmp_path(tmp)
continue
# Fix for https://github.com/ansible/ansible-modules-core/issues/1568.
# If checksums match, and follow = True, find out if 'dest' is a link. If so,
# change it to point to the source of the link.
if follow:
dest_status_nofollow = self._execute_remote_stat(dest_file, all_vars=task_vars, follow=False)
if dest_status_nofollow['islnk'] and 'lnk_source' in dest_status_nofollow.keys():
dest = dest_status_nofollow['lnk_source']
# Build temporary module_args.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=source_rel,
dest=dest,
original_basename=source_rel
)
)
# Execute the file module.
module_return = self._execute_module(module_name='file',
module_args=new_module_args, task_vars=task_vars,
tmp=tmp, delete_remote_tmp=delete_remote_tmp)
module_executed = True
if not module_return.get('checksum'):
module_return['checksum'] = local_checksum
if module_return.get('failed'):
result.update(module_return)
if not delete_remote_tmp:
self._remove_tmp_path(tmp)
return result
if module_return.get('changed'):
changed = True
# the file module returns the file path as 'path', but
# the copy module uses 'dest', so add it if it's not there
if 'path' in module_return and 'dest' not in module_return:
module_return['dest'] = module_return['path']
# reset the mode
self._task.args['mode'] = mode_value
# Delete tmp path if we were recursive or if we did not execute a module.
if not delete_remote_tmp or (delete_remote_tmp and not module_executed):
self._remove_tmp_path(tmp)
if module_executed and len(source_files) == 1:
result.update(module_return)
else:
result.update(dict(dest=dest, src=source, changed=changed))
if diffs:
result['diff'] = diffs
return result
def _create_content_tempfile(self, content):
''' Create a tempfile containing defined content '''
fd, content_tempfile = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
content = to_bytes(content)
try:
f.write(content)
except Exception as err:
os.remove(content_tempfile)
raise Exception(err)
finally:
f.close()
return content_tempfile
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
if content is not None:
os.remove(content_tempfile)
| gpl-3.0 |
lowitty/server | libsDarwin/twisted/trial/_dist/test/test_disttrial.py | 10 | 13156 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.disttrial}.
"""
import os
import sys
from cStringIO import StringIO
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import fail, succeed
from twisted.internet.task import Cooperator, deferLater
from twisted.internet.main import CONNECTION_DONE
from twisted.internet import reactor
from twisted.python.failure import Failure
from twisted.python.lockfile import FilesystemLock
from twisted.test.test_cooperator import FakeScheduler
from twisted.trial.unittest import TestCase
from twisted.trial.reporter import Reporter, TreeReporter
from twisted.trial.reporter import UncleanWarningsReporterWrapper
from twisted.trial.runner import TrialSuite, ErrorHolder
from twisted.trial._dist.disttrial import DistTrialRunner
from twisted.trial._dist.distreporter import DistReporter
from twisted.trial._dist.worker import LocalWorker
class FakeTransport(object):
"""
A simple fake process transport.
"""
def writeToChild(self, fd, data):
"""
Ignore write calls.
"""
class FakeReactor(object):
"""
A simple fake reactor for testing purposes.
"""
spawnCount = 0
stopCount = 0
runCount = 0
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
def stop(self):
self.stopCount += 1
def run(self):
self.runCount += 1
def addSystemEventTrigger(self, *args, **kw):
pass
class EternalTerminationPredicateFactory(object):
"""
A rigged terminationPredicateFactory for which time never pass.
"""
def __call__(self):
"""
See: L{task._Timer}
"""
return False
class DistTrialRunnerTests(TestCase):
"""
Tests for L{DistTrialRunner}.
"""
def setUp(self):
"""
Create a runner for testing.
"""
self.runner = DistTrialRunner(TreeReporter, 4, [],
workingDirectory=self.mktemp())
self.runner._stream = StringIO()
def getFakeSchedulerAndEternalCooperator(self):
"""
Helper to create fake scheduler and cooperator in tests.
The cooperator has a termination timer which will never inform
the scheduler that the task needs to be terminated.
@return: L{tuple} of (scheduler, cooperator)
"""
scheduler = FakeScheduler()
cooperator = Cooperator(
scheduler=scheduler,
terminationPredicateFactory=EternalTerminationPredicateFactory,
)
return scheduler, cooperator
def test_writeResults(self):
"""
L{DistTrialRunner.writeResults} writes to the stream specified in the
init.
"""
stringIO = StringIO()
result = DistReporter(Reporter(stringIO))
self.runner.writeResults(result)
self.assertTrue(stringIO.tell() > 0)
def test_createLocalWorkers(self):
"""
C{createLocalWorkers} iterates the list of protocols and create one
L{LocalWorker} for each.
"""
protocols = [object() for x in xrange(4)]
workers = self.runner.createLocalWorkers(protocols, "path")
for s in workers:
self.assertIsInstance(s, LocalWorker)
self.assertEqual(4, len(workers))
def test_launchWorkerProcesses(self):
"""
Given a C{spawnProcess} function, C{launchWorkerProcess} launches a
python process with a existing path as its argument.
"""
protocols = [ProcessProtocol() for i in range(4)]
arguments = []
environment = {}
def fakeSpawnProcess(processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
childFDs=None):
arguments.append(executable)
arguments.extend(args)
environment.update(env)
self.runner.launchWorkerProcesses(
fakeSpawnProcess, protocols, ["foo"])
self.assertEqual(arguments[0], arguments[1])
self.assertTrue(os.path.exists(arguments[2]))
self.assertEqual("foo", arguments[3])
self.assertEqual(os.pathsep.join(sys.path),
environment["TRIAL_PYTHONPATH"])
def test_run(self):
"""
C{run} starts the reactor exactly once and spawns each of the workers
exactly once.
"""
fakeReactor = FakeReactor()
suite = TrialSuite()
for i in xrange(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, self.runner._workerNumber)
def test_runUsedDirectory(self):
"""
L{DistTrialRunner} checks if the test directory is already locked, and
if it is generates a name based on it.
"""
class FakeReactorWithLock(FakeReactor):
def spawnProcess(oself, worker, *args, **kwargs):
self.assertEqual(os.path.abspath(worker._logDirectory),
os.path.abspath(
os.path.join(workingDirectory + "-1",
str(oself.spawnCount))))
localLock = FilesystemLock(workingDirectory + "-1.lock")
self.assertFalse(localLock.lock())
oself.spawnCount += 1
worker.makeConnection(FakeTransport())
worker._ampProtocol.run = lambda *args: succeed(None)
newDirectory = self.mktemp()
os.mkdir(newDirectory)
workingDirectory = os.path.join(newDirectory, "_trial_temp")
lock = FilesystemLock(workingDirectory + ".lock")
lock.lock()
self.addCleanup(lock.unlock)
self.runner._workingDirectory = workingDirectory
fakeReactor = FakeReactorWithLock()
suite = TrialSuite()
for i in xrange(10):
suite.addTest(TestCase())
self.runner.run(suite, fakeReactor)
def test_minimalWorker(self):
"""
L{DistTrialRunner} doesn't try to start more workers than the number of
tests.
"""
fakeReactor = FakeReactor()
self.runner.run(TestCase(), fakeReactor)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
def test_runUncleanWarnings(self):
"""
Running with the C{unclean-warnings} option makes L{DistTrialRunner}
uses the L{UncleanWarningsReporterWrapper}.
"""
fakeReactor = FakeReactor()
self.runner._uncleanWarnings = True
result = self.runner.run(TestCase(), fakeReactor)
self.assertIsInstance(result, DistReporter)
self.assertIsInstance(result.original,
UncleanWarningsReporterWrapper)
def test_runWithoutTest(self):
"""
When the suite contains no test, L{DistTrialRunner} takes a shortcut
path without launching any process or starting the reactor.
"""
fakeReactor = object()
suite = TrialSuite()
result = self.runner.run(suite, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("PASSED", output)
def test_runWithoutTestButWithAnError(self):
"""
Even if there is no test, the suite can contain an error (most likely,
an import error): this should make the run fail, and the error should
be printed.
"""
fakeReactor = object()
error = ErrorHolder("an error", Failure(RuntimeError("foo bar")))
result = self.runner.run(error, fakeReactor)
self.assertIsInstance(result, DistReporter)
output = self.runner._stream.getvalue()
self.assertIn("Running 0 test", output)
self.assertIn("foo bar", output)
self.assertIn("an error", output)
self.assertIn("errors=1", output)
self.assertIn("FAILED", output)
def test_runUnexpectedError(self):
"""
If for some reasons we can't connect to the worker process, the test
suite catches and fails.
"""
class FakeReactorWithFail(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.failingRun
def failingRun(self, case, result):
return fail(RuntimeError("oops"))
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
fakeReactor = FakeReactorWithFail()
result = self.runner.run(TestCase(), fakeReactor,
cooperator.cooperate)
self.assertEqual(fakeReactor.runCount, 1)
self.assertEqual(fakeReactor.spawnCount, 1)
scheduler.pump()
self.assertEqual(1, len(result.original.failures))
def test_runStopAfterTests(self):
"""
L{DistTrialRunner} calls C{reactor.stop} and unlocks the test directory
once the tests have run.
"""
functions = []
class FakeReactorWithSuccess(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
return succeed(None)
def addSystemEventTrigger(oself, phase, event, function):
self.assertEqual('before', phase)
self.assertEqual('shutdown', event)
functions.append(function)
workingDirectory = self.runner._workingDirectory
fakeReactor = FakeReactorWithSuccess()
self.runner.run(TestCase(), fakeReactor)
def check():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
self.assertEqual(1, fakeReactor.stopCount)
# We don't wait for the process deferreds here, so nothing is
# returned by the function before shutdown
self.assertIdentical(None, functions[0]())
return deferLater(reactor, 0, check)
def test_runWaitForProcessesDeferreds(self):
"""
L{DistTrialRunner} waits for the worker processes to stop when the
reactor is stopping, and then unlocks the test directory, not trying to
stop the reactor again.
"""
functions = []
workers = []
class FakeReactorWithEvent(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
workers.append(worker)
def addSystemEventTrigger(oself, phase, event, function):
self.assertEqual('before', phase)
self.assertEqual('shutdown', event)
functions.append(function)
workingDirectory = self.runner._workingDirectory
fakeReactor = FakeReactorWithEvent()
self.runner.run(TestCase(), fakeReactor)
def check(ign):
# Let the AMP deferreds fire
return deferLater(reactor, 0, realCheck)
def realCheck():
localLock = FilesystemLock(workingDirectory + ".lock")
self.assertTrue(localLock.lock())
# Stop is not called, as it ought to have been called before
self.assertEqual(0, fakeReactor.stopCount)
workers[0].processEnded(Failure(CONNECTION_DONE))
return functions[0]().addCallback(check)
def test_runUntilFailure(self):
"""
L{DistTrialRunner} can run in C{untilFailure} mode where it will run
the given tests until they fail.
"""
called = []
class FakeReactorWithSuccess(FakeReactor):
def spawnProcess(self, worker, *args, **kwargs):
worker.makeConnection(FakeTransport())
self.spawnCount += 1
worker._ampProtocol.run = self.succeedingRun
def succeedingRun(self, case, result):
called.append(None)
if len(called) == 5:
return fail(RuntimeError("oops"))
return succeed(None)
fakeReactor = FakeReactorWithSuccess()
scheduler, cooperator = self.getFakeSchedulerAndEternalCooperator()
result = self.runner.run(
TestCase(), fakeReactor, cooperate=cooperator.cooperate,
untilFailure=True)
scheduler.pump()
self.assertEqual(5, len(called))
self.assertFalse(result.wasSuccessful())
output = self.runner._stream.getvalue()
self.assertIn("PASSED", output)
self.assertIn("FAIL", output)
| mit |
rxuriguera/bibtexIndexMaker | src/bibim/references/format/formatter.py | 1 | 1717 |
# Copyright 2010 Ramon Xuriguera
#
# This file is part of BibtexIndexMaker.
#
# BibtexIndexMaker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BibtexIndexMaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BibtexIndexMaker. If not, see <http://www.gnu.org/licenses/>.
class ReferenceFormatter(object):
def __init__(self):
pass
def format_reference(self, reference, format_generator):
"""
Sets the 'entry' attribute of 'reference'
"""
format_generator.setup_new_reference()
format_generator.generate_header()
fields = reference.get_fields()
for field in fields:
field = reference.get_field(field)
if not field.value:
continue
generate_method = 'generate_' + field.name
try:
generate_method = getattr(format_generator, generate_method)
generate_method(field.value)
except AttributeError:
format_generator.generate_default(field.name, field.value)
format_generator.generate_footer()
reference.entry = format_generator.get_generated_reference()
reference.format = format_generator.format
| gpl-3.0 |
bgilbert/scanvark | scanvark/config.py | 1 | 1818 | #
# Scanvark -- a Gtk-based batch scanning program
#
# Copyright (c) 2012 Benjamin Gilbert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division
import yaml
class ScanvarkConfig(object):
def __init__(self, conffile):
with open(conffile) as fh:
config = yaml.safe_load(fh)
self.device = config['device']
self.device_config = config.get('scan-settings', {})
self.source_single = config.get('single-source', None)
self.source_double = config.get('double-source', None)
self.prepend_new_pages = config.get('page-order') == 'reverse'
def get_rotation(key):
val = config.get('rotate', 0)
return config.get(key, val)
self.rotate_odd = get_rotation('rotate-odd')
self.rotate_even = get_rotation('rotate-even')
self.jpeg_quality = config.get('jpeg-quality', 95)
self.thumbnail_size = config.get('thumbnail-size', (200, 150))
defaults = config.get('defaults', {})
self.default_color = defaults.get('color', True)
self.default_double_sided = defaults.get('double-sided', False)
self.default_resolution = defaults.get('resolution', 150)
| gpl-2.0 |
LingJiJian/LangTransUtil | lk.py | 1 | 4280 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# author B-y [email protected]
import os
import re
import json
import sys
class Singleton(type):
"""docstring for Singleton"""
def __init__(self, name,bases,dic):
super(Singleton, self).__init__(name,bases,dic)
self.instance = None
def __call__(self,*args,**kwargs):
if self.instance is None:
self.instance = super(Singleton,self).__call__(*args,**kwargs)
return self.instance
class FileManager:
""" 文件管理器 """
__metaclass__ = Singleton
def __init__(self):
super(FileManager, self).__init__()
self.__scanFilePaths = {} #文件内容
self.__tranWordArrs = {} #匹配到的 字符串
self.__tranWordDic = {} #匹配到的 字符串
def setInDir(self,path):
self.__inDir = path
def setOutDir(self,path):
self.__outDir = path
def setLogCallFuc(self,func):
self.__logCallFunc = func
def run(self):
self.__preload()
self.__scanInDir(self.__inDir)
self.__progressFiles()
self.__exportFile()
#预加载 配置
def __preload(self):
path = sys.path[0]
if os.path.isfile(path):
path = os.path.dirname(path)
pFile = open(os.path.join(path,"config.json"),"r")
self.__config = json.loads(pFile.read())
keyArr = []
valArr = []
self._tmpkeyValFlag = False
if os.path.exists(self.__outDir):
def onHandle(tmpStr):
if self._tmpkeyValFlag:
valArr.append(tmpStr);
else:
keyArr.append(tmpStr);
self._tmpkeyValFlag = not self._tmpkeyValFlag;
pFile = open(self.__outDir,"r")
self.__scanWordInContent(pFile.read(),onHandle)
for i,v in enumerate(keyArr):
self.__tranWordDic[ keyArr[i] ] = valArr[i]
else:
self.__tranWordDic = {}
#扫描目录
def __scanInDir(self,path):
arr = os.listdir(path)
for line in arr:
if self.__isIgnoreScan(line):
pass
else:
filepath = os.path.join(path,line)
if os.path.isdir(filepath):
self.__scanInDir(filepath)
else:
if os.path.splitext(filepath)[1] in self.__config["scan_suffix"]:
pFile = open(filepath,"r")
try:
self.__scanFilePaths[filepath] = pFile.read()
self.__tranWordArrs[filepath] = []
finally:
pFile.close()
#执行扫描行为
def __progressFiles(self):
for path,content in self.__scanFilePaths.items():
def onHandle(tmpStr):
if self.has_zh(tmpStr.decode('utf-8')):
key = "\"a"+self.__getWordIdx()+"\"";
if not self.__tranWordDic.has_key(key) :
self.__tranWordDic[ key ] = tmpStr
self.__tranWordArrs[path].append({"key":key,"val":tmpStr})
self.__scanWordInContent(content,onHandle)
self.__logCallFunc({"isFinish":True})
#在文件内容中扫描中文
def __scanWordInContent(self,content,func):
tmpStr = ""
markFlag = False
for i,ch in enumerate(content):
if ch == "\"":
if content[i-1] == "\\":
if markFlag:
tmpStr += "\""
continue;
markFlag = not markFlag;
if markFlag == False :
tmpStr += "\""
func(tmpStr)
tmpStr = ""
if markFlag :
tmpStr += ch
def has_zh(self,txt):
zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
ret = False
if zhPattern.search(txt):
ret = True
else:
ret = False
return ret
#是否忽略扫描的文件
def __isIgnoreScan(self,path):
ret = False
for ignore_path in self.__config["ignore_path"]:
# print(os.path.join(self.__inDir,ignore_path), os.path.join(self.__inDir,path))
if os.path.join(self.__inDir,ignore_path) == os.path.join(self.__inDir,path):
ret = True
break
return ret
def __getWordIdx(self):
idx = 10000;
while True:
if self.__tranWordDic.has_key("\"a"+str(idx)+"\""):
idx += 1
continue;
else:
return str(idx);
#输出文件
def __exportFile(self):
content = "i18n = {} \n";
for k,v in self.__tranWordDic.items():
content += "i18n[" + k + "] = " + self.__tranWordDic[k] + "\n";
pFile = open(self.__outDir,"w")
pFile.write(content)
pFile.close()
for path,content in self.__scanFilePaths.items():
if len(self.__tranWordArrs[path]) > 0 :
for param in self.__tranWordArrs[path]:
content = content.replace(param.get("val"),"i18n["+param.get("key")+"]")
self.__scanFilePaths[path] = content
pFile = open(path,"w")
pFile.write(content)
pFile.close()
| mit |
leethargo/geonet | geonet/network.py | 1 | 3958 | '''
Data structures for (Steiner) tree networks
'''
import networkx as nx
class Net(object):
'''Network'''
def __init__(self, nodes, arcs):
'''
nodes: node IDs
arcs: tuples of node IDs (tail, head)
'''
self.dg = nx.DiGraph()
self.dg.add_nodes_from(nodes)
self.dg.add_edges_from(arcs)
def get_nodes(self):
return self.dg.nodes()
def get_arcs(self):
return self.dg.edges()
def get_degree(self, n):
return self.dg.degree(n)
def get_neighbors(self, n):
return self.dg.predecessors(n) + self.dg.successors(n)
def __repr__(self):
_nodes = ', '.join([repr(n) for n in self.get_nodes()])
_arcs = ', '.join([repr(a) for a in self.get_arcs()])
return 'Net([%s], [%s])' % (_nodes, _arcs)
# http://stackoverflow.com/questions/390250/
def __eq__(self, other):
if isinstance(other, self.__class__):
# unfortunately, networkx.DiGraph does not implement __eq__
return all([
other.dg.node == self.dg.node,
other.dg.edge == self.dg.edge,
])
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
class SteinerTree(Net):
'''Steiner tree with some node positions fixed'''
def __init__(self, nodes, arcs, pos):
'''
nodes: node IDs
arcs: tuples of node IDs (tail, head)
pos: map from (terminal) node IDs to position tuple
'''
super(SteinerTree, self).__init__(nodes, arcs)
for k,v in pos.items():
self.dg.node[k]['pos'] = v
def is_steiner(self, n):
return not self.is_terminal(n)
def is_terminal(self, n):
return 'pos' in self.dg.node[n]
def get_terminal_nodes(self):
return [n for n in self.get_nodes() if self.is_terminal(n)]
def get_steiner_nodes(self):
return [n for n in self.get_nodes() if self.is_steiner(n)]
def get_position(self, t):
if not self.is_terminal(t):
raise KeyError("Not a terminal: %s" % t)
return self.dg.node[t]['pos']
def get_terminal_positions(self):
return {t: self.get_position(t) for t in self.get_terminal_nodes()}
def is_full_steiner_topology(self):
'''or is the tree degenerate?
three criteria are applied:
1. number of Steiner nodes equals the number of terminals - 2
2. Steiner nodes have degree 3
3. Terminals have degree 1 and are connected to Steiner nodes
'''
terms = self.get_terminal_nodes()
steins = self.get_steiner_nodes()
# special cases for n < 3
if len(terms) < 3 and len(steins) == 0:
return True
# general case
if len(steins) != len(terms) - 2:
return False
if any(self.get_degree(s) != 3 for s in steins):
return False
if any(self.get_degree(t) != 1 for t in terms):
return False
for t in terms:
neighbors = self.get_neighbors(t)
assert len(neighbors) == 1
n = neighbors[0]
if self.is_terminal(n):
return False
return True
def __repr__(self):
_nodes = ', '.join([repr(n) for n in self.get_nodes()])
_arcs = ', '.join([repr(a) for a in self.get_arcs()])
_pos = ', '.join('%s:%s' % (t, self.get_position(t))
for t in self.get_terminal_nodes())
return 'SteinerTree([%s], [%s], {%s})' % (_nodes, _arcs, _pos)
def __eq__(self, other):
return super(SteinerTree, self).__eq__(other) and \
other.get_terminal_positions() == self.get_terminal_positions()
def merge_pos(tree, steiner_pos):
'''build dict as union from terminal and steiner positions'''
pos = dict(tree.get_terminal_positions())
pos.update(steiner_pos)
return pos
| mit |
chiviak/headphones | lib/unidecode/x021.py | 62 | 3964 | data = (
'', # 0x00
'', # 0x01
'C', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'H', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'N', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'P', # 0x19
'Q', # 0x1a
'', # 0x1b
'', # 0x1c
'R', # 0x1d
'', # 0x1e
'', # 0x1f
'(sm)', # 0x20
'TEL', # 0x21
'(tm)', # 0x22
'', # 0x23
'Z', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'K', # 0x2a
'A', # 0x2b
'', # 0x2c
'', # 0x2d
'e', # 0x2e
'e', # 0x2f
'E', # 0x30
'F', # 0x31
'F', # 0x32
'M', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'FAX', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'D', # 0x45
'd', # 0x46
'e', # 0x47
'i', # 0x48
'j', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'F', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
' 1/3 ', # 0x53
' 2/3 ', # 0x54
' 1/5 ', # 0x55
' 2/5 ', # 0x56
' 3/5 ', # 0x57
' 4/5 ', # 0x58
' 1/6 ', # 0x59
' 5/6 ', # 0x5a
' 1/8 ', # 0x5b
' 3/8 ', # 0x5c
' 5/8 ', # 0x5d
' 7/8 ', # 0x5e
' 1/', # 0x5f
'I', # 0x60
'II', # 0x61
'III', # 0x62
'IV', # 0x63
'V', # 0x64
'VI', # 0x65
'VII', # 0x66
'VIII', # 0x67
'IX', # 0x68
'X', # 0x69
'XI', # 0x6a
'XII', # 0x6b
'L', # 0x6c
'C', # 0x6d
'D', # 0x6e
'M', # 0x6f
'i', # 0x70
'ii', # 0x71
'iii', # 0x72
'iv', # 0x73
'v', # 0x74
'vi', # 0x75
'vii', # 0x76
'viii', # 0x77
'ix', # 0x78
'x', # 0x79
'xi', # 0x7a
'xii', # 0x7b
'l', # 0x7c
'c', # 0x7d
'd', # 0x7e
'm', # 0x7f
'(D', # 0x80
'D)', # 0x81
'((|))', # 0x82
')', # 0x83
'[?]', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'[?]', # 0x87
'[?]', # 0x88
'[?]', # 0x89
'[?]', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'-', # 0x90
'|', # 0x91
'-', # 0x92
'|', # 0x93
'-', # 0x94
'|', # 0x95
'\\', # 0x96
'/', # 0x97
'\\', # 0x98
'/', # 0x99
'-', # 0x9a
'-', # 0x9b
'~', # 0x9c
'~', # 0x9d
'-', # 0x9e
'|', # 0x9f
'-', # 0xa0
'|', # 0xa1
'-', # 0xa2
'-', # 0xa3
'-', # 0xa4
'|', # 0xa5
'-', # 0xa6
'|', # 0xa7
'|', # 0xa8
'-', # 0xa9
'-', # 0xaa
'-', # 0xab
'-', # 0xac
'-', # 0xad
'-', # 0xae
'|', # 0xaf
'|', # 0xb0
'|', # 0xb1
'|', # 0xb2
'|', # 0xb3
'|', # 0xb4
'|', # 0xb5
'^', # 0xb6
'V', # 0xb7
'\\', # 0xb8
'=', # 0xb9
'V', # 0xba
'^', # 0xbb
'-', # 0xbc
'-', # 0xbd
'|', # 0xbe
'|', # 0xbf
'-', # 0xc0
'-', # 0xc1
'|', # 0xc2
'|', # 0xc3
'=', # 0xc4
'|', # 0xc5
'=', # 0xc6
'=', # 0xc7
'|', # 0xc8
'=', # 0xc9
'|', # 0xca
'=', # 0xcb
'=', # 0xcc
'=', # 0xcd
'=', # 0xce
'=', # 0xcf
'=', # 0xd0
'|', # 0xd1
'=', # 0xd2
'|', # 0xd3
'=', # 0xd4
'|', # 0xd5
'\\', # 0xd6
'/', # 0xd7
'\\', # 0xd8
'/', # 0xd9
'=', # 0xda
'=', # 0xdb
'~', # 0xdc
'~', # 0xdd
'|', # 0xde
'|', # 0xdf
'-', # 0xe0
'|', # 0xe1
'-', # 0xe2
'|', # 0xe3
'-', # 0xe4
'-', # 0xe5
'-', # 0xe6
'|', # 0xe7
'-', # 0xe8
'|', # 0xe9
'|', # 0xea
'|', # 0xeb
'|', # 0xec
'|', # 0xed
'|', # 0xee
'|', # 0xef
'-', # 0xf0
'\\', # 0xf1
'\\', # 0xf2
'|', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 |
dav94/plastex | plasTeX/Packages/float.py | 8 | 1426 | #!/usr/bin/env python
import new
from plasTeX import Command, Environment
from plasTeX.Base.LaTeX.Floats import Float, Caption
class newfloat(Command):
args = 'name:str pos:str capfile:str [ reset:str ]'
def invoke(self, tex):
Command.invoke(self, tex)
name = str(self.attributes['name'])
# Create the float class and the caption class
floatcls = new.classobj(name, (Float,), {})
captioncls = new.classobj('caption', (Caption,),
{'macroName':'caption', 'counter':name})
floatcls.caption = captioncls
c = self.ownerDocument.context
c.addGlobal(name, floatcls)
# Create a counter
resetby = self.attributes['reset'] or 'chapter'
c.newcounter(name, resetby, 0, format='${the%s}.${%s}' % (resetby,name))
# Create the float name macro
c.newcommand(name+'name', 0, name)
class floatstyle(Command):
args = 'style:str'
class restylefloat(Command):
args = 'float:str'
class floatname(Command):
args = 'float:str name:str'
def invoke(self, tex):
Command.invoke(self, tex)
float = str(self.attributes['float'])
name = self.attributes['name']
c = self.ownerDocument.context
c.newcommand(float+'name', 0, name)
class floatplacement(Command):
args = 'float:str pos:str'
class listof(Command):
args = 'float:str title'
| mit |
fengzhyuan/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
tuomas777/parkkihubi | parkings/tests/api/utils.py | 1 | 3212 | import json
import uuid
from rest_framework.authtoken.models import Token
ALL_METHODS = ('get', 'post', 'put', 'patch', 'delete')
def token_authenticate(api_client, user):
token, _ = Token.objects.get_or_create(user=user)
api_client.credentials(HTTP_AUTHORIZATION='ApiKey ' + token.key)
return api_client
def get(api_client, url, status_code=200):
response = api_client.get(url)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def post(api_client, url, data=None, status_code=201):
response = api_client.post(url, data)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def put(api_client, url, data=None, status_code=200):
response = api_client.put(url, data)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def patch(api_client, url, data=None, status_code=200):
response = api_client.patch(url, data)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
return json.loads(response.content.decode('utf-8'))
def delete(api_client, url, status_code=204):
response = api_client.delete(url)
assert response.status_code == status_code, '%s %s' % (response.status_code, response.data)
def check_method_status_codes(api_client, urls, methods, status_code, **kwargs):
# accept also a single url as a string
if isinstance(urls, str):
urls = (urls,)
for url in urls:
for method in methods:
response = getattr(api_client, method)(url)
assert response.status_code == status_code, (
'%s %s expected %s, got %s %s' % (method, url, status_code, response.status_code, response.data)
)
error_code = kwargs.get('error_code')
if error_code:
assert response.data['code'] == error_code, (
'%s %s expected error_code %s, got %s' % (method, url, error_code, response.data['code'])
)
def check_list_endpoint_base_fields(data):
assert set(data.keys()) == {'next', 'previous', 'count', 'results'}
def check_required_fields(api_client, url, expected_required_fields, detail_endpoint=False):
method = put if detail_endpoint else post
# send empty data to get all required fields in an error message, they will be in form
# { "<field name>": ["This field is required"], "<field name 2>": ["This field is required"], ...}
response_data = method(api_client, url, {}, 400)
required_fields = set()
for field in response_data:
if isinstance(response_data[field], list) and 'This field is required.' in response_data[field]:
required_fields.add(field)
assert required_fields == expected_required_fields, '%s != %s' % (required_fields, expected_required_fields)
def get_ids_from_results(results, as_set=True):
id_list = [uuid.UUID(result['id']) for result in results]
return set(id_list) if as_set else id_list
| mit |
michaelhush/M-LOOP | docs/conf.py | 1 | 10253 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# M-LOOP documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 24 11:34:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'M-LOOP'
copyright = '2016, Michael R Hush'
author = 'Michael R Hush'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.2'
# The full version, including alpha/beta/rc tags.
release = '3.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
# exclude_patterns = ['_templates']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Custom sidebar templates, maps document names to template names.
html_sidebars = { '**': ['about.html','navigation.html','relations.html', 'searchbox.html'], }
#'globaltoc.html',
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'logo':'M-LOOP_logo.png',
'logo_name':True,
'description':'Machine-Learning Online Optimization Package',
'github_user':'michaelhush',
'github_repo':'M-LOOP',
'github_banner':True,
'font_family':"Arial, Helvetica, sans-serif",
'head_font_family':"Arial, Helvetica, sans-serif",
'analytics_id':'UA-83520804-1'}
#'github_button':True,
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'M-LOOP v3.2.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/M-LOOP_logo.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/M-LOOP_logo.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'M-LOOPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'M-LOOP.tex', 'M-LOOP Documentation',
'Michael R Hush', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'M-LOOP_logo.pdf'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'm-loop', 'M-LOOP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'M-LOOP', 'M-LOOP Documentation',
author, 'M-LOOP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
rouxcode/django-admin-sort | admin_sort/tests/testapp/models.py | 1 | 5140 | # -*- coding: utf-8 -*-
from django.db import models
from admin_sort.models import SortableModelMixin
class Author(SortableModelMixin, models.Model):
"""
SortableModelMixin: on save, intercept and first update needed other
instances, then save
"""
name = models.CharField('Name', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
position_field = 'my_order'
insert_position = 'last'
class Meta:
ordering = ('my_order', )
def __unicode__(self):
return self.name
class SortableBook(models.Model):
"""
the classic sortable change list: dndrop sorting, using SortableAdminMixin
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
class Meta(object):
ordering = ('my_order',)
def __unicode__(self):
return self.title
class AnotherSortableBook(models.Model):
"""
the other sortable change list: dropdowns sorting,
using DropdownSortableAdminMixin
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(default=0, blank=False, null=False)
author = models.ForeignKey(Author, null=True, on_delete=models.SET_NULL)
class Meta(object):
ordering = ('my_order',)
def __unicode__(self):
return self.title
class Chapter(models.Model):
"""
various SortableInlineMixon modes
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
another_book = models.ForeignKey(
AnotherSortableBook, null=True, on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
another_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_order', )
def __unicode__(self):
return 'Chapter: {0}'.format(self.title)
class Notes(models.Model):
"""
various SortableInlineMixon modes
"""
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
another_book = models.ForeignKey(
AnotherSortableBook, null=True, on_delete=models.SET_NULL)
note = models.CharField('Note', null=True, blank=True, max_length=255)
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
one_more = models.CharField(
'Note3 (simulating tabular inlines)',
null=True, blank=True, max_length=255)
my_order = models.PositiveIntegerField(blank=False, null=True)
another_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_order', )
def __unicode__(self):
return 'Note: {0}'.format(self.note)
class ChapterExtraZero(models.Model):
"""
various SortableInlineMixon modes (testing "extra" on admin.Meta)
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', '-title')
def __unicode__(self):
return 'ChapterExtraZero: {0}'.format(self.title)
class NotesExtraZero(models.Model):
"""
various SortableInlineMixon modes (testing "extra" on admin.Meta)
"""
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', 'another_field')
def __unicode__(self):
return 'NotesExtraZero: {0}'.format(self.another_field)
class Another(models.Model):
"""
normal inline - affected in any way!?
"""
title = models.CharField('Title', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
class Meta(object):
ordering = ('my_order', '-title')
def __unicode__(self):
return 'Another: {0}'.format(self.title)
class AnotherOne(models.Model):
"""
normal inline - affected in any way!?
"""
another_field = models.CharField(
'Note2', null=True, blank=True, max_length=255)
book = models.ForeignKey(SortableBook, null=True,
on_delete=models.SET_NULL)
my_order = models.PositiveIntegerField(blank=False, null=True)
def __unicode__(self):
return 'AnotherOne: {0}'.format(self.another_field)
| mit |
ngoix/OCRF | sklearn/linear_model/ransac.py | 14 | 17163 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss: string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight: array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' will be removed in version 0.20. Use "
"'loss' instead.", DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
openhatch/new-mini-tasks | vendor/packages/Django/django/core/management/sql.py | 104 | 7942 | from __future__ import unicode_literals
import codecs
import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
from django.utils._os import upath
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the database.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't properly specified the ENGINE setting for the database.\n" +
"see: https://docs.djangoproject.com/en/dev/ref/settings/#databases")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.table_names(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append((model, f))
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_flush(style, connection, only_django=False, reset_sequences=True):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def _split_statements(content):
comment_re = re.compile(r"^((?:'[^']*'|[^'])*?)--.*$")
statements = []
statement = []
for line in content.split("\n"):
cleaned_line = comment_re.sub(r"\1", line).strip()
if not cleaned_line:
continue
statement.append(cleaned_line)
if cleaned_line.endswith(";"):
statements.append(" ".join(statement))
statement = []
return statements
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(upath(models.get_app(model._meta.app_label).__file__)), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
with codecs.open(sql_file, 'U', encoding=settings.FILE_CHARSET) as fp:
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
output.extend(_split_statements(fp.read()))
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running post-sync handlers for application %s" % app_name)
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
| apache-2.0 |
datamade/pyhacrf | pyhacrf/pyhacrf.py | 1 | 13946 | # Authors: Dirko Coetsee
# License: 3-clause BSD
""" Implements a Hidden Alignment Conditional Random Field (HACRF). """
from __future__ import absolute_import
import numpy as np
import lbfgs
from .algorithms import forward, backward
from .algorithms import forward_predict, forward_max_predict
from .algorithms import gradient, gradient_sparse, populate_sparse_features, sparse_multiply
from . import adjacent
from .state_machine import DefaultStateMachine
class Hacrf(object):
""" Hidden Alignment Conditional Random Field with L2 regularizer.
Parameters
----------
l2_regularization : float, optional (default=0.0)
The regularization parameter.
optimizer : function, optional (default=None)
The optimizing function that should be used minimize the negative log posterior.
The function should have the signature:
min_objective, argmin_objective, ... = fmin(obj, x0, **optimizer_kwargs),
where obj is a function that returns
the objective function and its gradient given a parameter vector; and x0 is the initial parameter vector.
optimizer_kwargs : dictionary, optional (default=None)
The keyword arguments to pass to the optimizing function. Only used when `optimizer` is also specified.
state_machine : Instance of `GeneralStateMachine` or `DefaultStateMachine`, optional (default=`DefaultStateMachine`)
The state machine to use to generate the lattice.
viterbi : Boolean, optional (default=False).
Whether to use Viterbi (max-sum) decoding for predictions (not training)
instead of the default sum-product algorithm.
References
----------
See *A Conditional Random Field for Discriminatively-trained Finite-state String Edit Distance*
by McCallum, Bellare, and Pereira, and the report *Conditional Random Fields for Noisy text normalisation*
by Dirko Coetsee.
"""
def __init__(self,
l2_regularization=0.0,
optimizer=None,
optimizer_kwargs=None,
state_machine=None):
self.parameters = None
self.classes = None
self.l2_regularization = l2_regularization
self._optimizer = optimizer
self._optimizer_kwargs = optimizer_kwargs
self._optimizer_result = None
self._state_machine = state_machine
self._states_to_classes = None
self._evaluation_count = None
if (state_machine is None or
isinstance(state_machine, DefaultStateMachine)):
self._Model = _AdjacentModel
else:
self._Model = _GeneralModel
def fit(self, X, y, verbosity=0):
"""Fit the model according to the given training data.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len, string2_len, n_features), where
string1_len and string2_len are the length of the two training strings and n_features the
number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
self.classes = list(set(y))
n_points = len(y)
if len(X) != n_points:
raise Exception('Number of training points should be the same as training labels.')
if not self._state_machine:
self._state_machine = DefaultStateMachine(self.classes)
# Initialize the parameters given the state machine, features, and target classes.
self.parameters = self._initialize_parameters(self._state_machine, X[0].shape[2])
# Create a new model object for each training example
models = [self._Model(self._state_machine, x, ty) for x, ty in zip(X, y)]
self._evaluation_count = 0
def _objective(parameters):
gradient = np.zeros(self.parameters.shape)
ll = 0.0 # Log likelihood
# TODO: Embarrassingly parallel
for model in models:
dll, dgradient = model.forward_backward(parameters.reshape(self.parameters.shape))
ll += dll
gradient += dgradient
parameters_without_bias = np.array(parameters, dtype='float64') # exclude the bias parameters from being regularized
parameters_without_bias[0] = 0
ll -= self.l2_regularization * np.dot(parameters_without_bias.T, parameters_without_bias)
gradient = gradient.flatten() - 2.0 * self.l2_regularization * parameters_without_bias
if verbosity > 0:
if self._evaluation_count == 0:
print('{:10} {:10} {:10}'.format('Iteration', 'Log-likelihood', '|gradient|'))
if self._evaluation_count % verbosity == 0:
print('{:10} {:10.4} {:10.4}'.format(self._evaluation_count, ll, (abs(gradient).sum())))
self._evaluation_count += 1
# TODO: Allow some of the parameters to be frozen. ie. not trained. Can later also completely remove
# TODO: the computation associated with these parameters.
return -ll, -gradient
def _objective_copy_gradient(paramers, g):
nll, ngradient = _objective(paramers)
g[:] = ngradient
return nll
if self._optimizer:
self.optimizer_result = self._optimizer(_objective, self.parameters.flatten(), **self._optimizer_kwargs)
self.parameters = self.optimizer_result[0].reshape(self.parameters.shape)
else:
optimizer = lbfgs.LBFGS()
final_betas = optimizer.minimize(_objective_copy_gradient,
x0=self.parameters.flatten(),
progress=None)
self.optimizer_result = final_betas
self.parameters = final_betas.reshape(self.parameters.shape)
self.parameters = np.asfortranarray(self.parameters)
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len, string2_len, n_features, where
string1_len and string2_len are the length of the two training strings and n_features the
number of features.
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
predictions = [self._Model(self._state_machine, x).predict(self.parameters.T)
for x in X]
predictions = np.array(predictions)
return predictions
def fast_pair(self, x):
predictions = self._Model(self._state_machine, x).predict(self.parameters.T)
return predictions
def predict(self, X):
"""Predict the class for X.
The predicted class for each sample in X is returned.
Parameters
----------
X : List of ndarrays, one for each training example.
Each training example's shape is (string1_len,
string2_len, n_features), where string1_len and
string2_len are the length of the two training strings and
n_features the number of features.
Returns
-------
y : iterable of shape = [n_samples]
The predicted classes.
"""
return [self.classes[prediction.argmax()] for prediction in self.predict_proba(X)]
@staticmethod
def _initialize_parameters(state_machine, n_features):
""" Helper to create initial parameter vector with the correct shape. """
return np.zeros((state_machine.n_states
+ state_machine.n_transitions,
n_features))
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return {'l2_regularization': self.l2_regularization,
'optimizer': self._optimizer,
'optimizer_kwargs': self._optimizer_kwargs}
def set_params(self, l2_regularization=0.0, optimizer=None, optimizer_kwargs=None):
"""Set the parameters of this estimator.
Returns
-------
self
"""
self.l2_regularization = l2_regularization
self._optimizer = optimizer
self._optimizer_kwargs = optimizer_kwargs
return self
class _Model(object):
def __init__(self, state_machine, x, y=None):
self.state_machine = state_machine
self.states_to_classes = state_machine.states_to_classes
self.x = x
self.y = y
self.forward_backward = self.dense_forward_backward
def predict(self, parameters):
""" Run forward algorithm to find the predicted distribution over classes. """
x_dot_parameters = np.matmul(self.x, parameters)
probs = self._forward_predict(x_dot_parameters)
return probs
def dense_forward_backward(self, parameters):
""" Run the forward backward algorithm with the given parameters. """
I, J, K = self.x.shape
x_dot_parameters = np.dot(self.x,
parameters.T)
alpha = self._forward(x_dot_parameters)
beta = self._backward(x_dot_parameters)
classes_to_ints = {k: i
for i, k
in enumerate(set(self.states_to_classes.values()))}
states_to_classes = np.array([classes_to_ints[self.states_to_classes[state]]
for state
in range(max(self.states_to_classes.keys()) + 1)],
dtype='int64')
ll, deriv = gradient(alpha, beta, parameters, states_to_classes,
self.x, classes_to_ints[self.y], I, J, K)
return ll, deriv
def sparse_forward_backward(self, parameters):
""" Run the forward backward algorithm with the given parameters. """
I, J, K = self.x.shape
C = self.sparse_x[0].shape[2]
S, _ = parameters.shape
x_dot_parameters = np.zeros((I, J, S))
sparse_multiply(x_dot_parameters,
self.sparse_x[0],
self.sparse_x[1],
parameters.T,
I, J, K, C, S)
alpha = self._forward(x_dot_parameters)
beta = self._backward(x_dot_parameters)
classes_to_ints = {k: i
for i, k
in enumerate(set(self.states_to_classes.values()))}
states_to_classes = np.array([classes_to_ints[self.states_to_classes[state]]
for state
in range(max(self.states_to_classes.keys()) + 1)],
dtype='int64')
ll, deriv = gradient_sparse(alpha, beta,
parameters,
states_to_classes,
self.sparse_x[0],
self.sparse_x[1],
classes_to_ints[self.y],
I, J,
self.sparse_x[0].shape[2])
return ll, deriv
def _construct_sparse_features(self, x):
""" Helper to construct a sparse representation of the features. """
I, J, K = x.shape
new_array_height = (x != 0).sum(axis=2).max()
index_array = -np.ones((I, J, new_array_height), dtype='int64')
value_array = -np.ones((I, J, new_array_height), dtype='float64')
populate_sparse_features(x, index_array, value_array, I, J, K)
return index_array, value_array
class _GeneralModel(_Model):
def __init__(self, state_machine, x, y=None):
super(_GeneralModel, self).__init__(state_machine, x, y)
self._lattice = self.state_machine.build_lattice(self.x)
def _forward(self, x_dot_parameters):
""" Helper to calculate the forward weights. """
return forward(self._lattice, x_dot_parameters,
self.state_machine.n_states)
def _backward(self, x_dot_parameters):
""" Helper to calculate the backward weights. """
I, J, _ = self.x.shape
return backward(self._lattice, x_dot_parameters, I, J,
self.state_machine.n_states)
def _forward_predict(self, x_dot_parameters):
return forward_predict(self._lattice, x_dot_parameters,
self.state_machine.n_states)
class _AdjacentModel(_Model):
def _forward(self, x_dot_parameters) :
return adjacent.forward(x_dot_parameters,
self.state_machine.n_states)
def _backward(self, x_dot_parameters) :
print(x_dot_parameters)
return adjacent.backward(x_dot_parameters,
self.state_machine.n_states)
def _forward_predict(self, x_dot_parameters):
return adjacent.forward_predict(x_dot_parameters,
self.state_machine.n_states)
| bsd-3-clause |
BWeatherMaine/WXGIF | libs/images2gif.py | 2 | 28499 | # -*- coding: utf-8 -*-
# Copyright (c) 2010, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Some implementation details are ased on gif file structure as provided
by wikipedia.
"""
import os
try:
import PIL
from PIL import Image, ImageChops
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# devide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
def getheaderAnim(im):
""" Animation header. To replace the getheader()[0] """
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(im):
""" Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
wether additional colours comes in play that require a redefined palette
Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
"""
bb = '\x2C' # Image separator,
bb += intToBin( 0 ) # Left position
bb += intToBin( 0 ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
bb += '\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256.
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
#def getAppExt(loops=float('inf')):
#compile error commented by zcwang
def getAppExt(loops=float(0)):
""" Application extention. Part that specifies amount of loops.
If loops is inf, it goes on infinitely.
"""
if loops == 0:
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
# if loops == float('inf'):
if loops == float(0):
loops = 2**16-1
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(duration=0.1):
""" Graphics Control Extension. A sort of header at the start of
each image. Specifies transparancy and duration. """
bb = '\x21\xF9\x04'
bb += '\x08' # no transparancy
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def _writeGifToFile(fp, images, durations, loops):
""" Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palettes.append(im.palette.getdata()[1])
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = getheaderAnim(im)
appext = getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = getGraphicsControlExt(durations[frames])
# Make image descriptor suitable for using 256 local color palette
lid = getImageDescriptor(im)
# Write local header
if palette != globalPalette:
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False, nq=0):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nq = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nq.paletteImage())
else:
im = nq.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images2):
durations = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images2]
# Open file
fp = open(filename, 'wb')
# Write
try:
n = _writeGifToFile(fp, images2, duration, loops)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.Image.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
assert image.mode == "RGBA"
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
out.write(rr if rgb else bb)
out.write(gg)
out.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(range(mid-1,-1,-1)+range(-1,mid))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Beginning 1D learning: samplepixels =",samplepixels," rad =", rad
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print tmp + printed_string,
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print "Finished 1D learning: final alpha =",(1.0*alpha)/self.INITALPHA,"!"
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if cKDTree:
return self.quantize_with_scipy(image)
else:
print 'Scipy not available, falling back to slower version.'
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print "Distance:", (result[0].sum()/(w*h))
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, (r, g, b)):
i = self.inxsearch(r, g, b)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
| apache-2.0 |
Pegase745/gitlab-freak | gitlab_freak/helpers.py | 2 | 2832 | from __future__ import absolute_import, unicode_literals
from distutils.version import LooseVersion
from sqlalchemy.sql.expression import ClauseElement
from flask import Flask
import json
import requests
from gitlab_freak.models import db, ProjectDependency
import gitlab
app = Flask(__name__)
app.config.from_envvar('GITLAB_FREAK_SETTINGS')
git = gitlab.Gitlab(app.config['GITLAB_ENDPOINT'], app.config['GITLAB_TOKEN'])
def get_or_create(session, model, defaults=None, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
params = dict(
(k, v) for k, v in kwargs.iteritems() if not isinstance(v, ClauseElement))
params.update(defaults or {})
instance = model(**params)
session.add(instance)
return instance, True
def nodeLatestVersion(dependency, project_id):
r = requests.get('%s%s/latest' % (app.config['NPM_REGISTRY'], dependency))
latestVersion = r.json().get('version')
try:
dep = ProjectDependency.by_project(project_id, dependency)
dep.latest_version = latestVersion
if LooseVersion(dep.actual_version) < LooseVersion(latestVersion):
dep.status = 'ko'
else:
dep.status = 'ok'
db.session.commit()
except Exception, e:
app.logger.error(e)
db.session.rollback()
def nodeDepsFetcher(project_id):
# Get dependencies from package.json
project = git.getproject(project_id)
depFileEncoded = git.getfile(project_id, 'package.json',
project['default_branch'])
# Decode from base64
deps = json.loads(depFileEncoded.get('content').decode('base64'))
mainDeps = deps.get('dependencies')
devDeps = deps.get('devDependencies')
# Insert in project_dependency
# TODO create single function for that
for mDep, mVersion in list(mainDeps.items()):
mdep, created = get_or_create(db.session, ProjectDependency,
project_id=project_id, name=mDep,
actual_version=mVersion)
if not created:
app.logger.info('[%s] Dep %s already exist' % (project_id, mDep))
db.session.commit()
nodeLatestVersion(mDep, project_id)
for devDep, devVersion in list(devDeps.items()):
ddep, created = get_or_create(db.session, ProjectDependency,
project_id=project_id, name=devDep,
actual_version=devVersion, dev=True)
if not created:
app.logger.info('[%s] Dev dep %s already exist' %
(project_id, devDep))
db.session.commit()
nodeLatestVersion(devDep, project_id)
return True
| mit |
fabiand/anaconda | pyanaconda/constants_text.py | 2 | 1811 | #
# constants_text.py: text mode constants
#
# Copyright (C) 2000, 2001, 2002 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# pylint: disable=W0401
from pyanaconda.constants import *
from pyanaconda.i18n import _, N_
class Translator:
"""A simple class to facilitate on-the-fly translation for newt buttons"""
def __init__(self, button, check):
self.button = button
self.check = check
def __getitem__(self, which):
if which == 0:
return _(self.button)
elif which == 1:
return self.check
raise IndexError
def __len__(self):
return 2
TEXT_OK_STR = N_("OK")
TEXT_OK_CHECK = "ok"
TEXT_OK_BUTTON = Translator(TEXT_OK_STR, TEXT_OK_CHECK)
TEXT_CANCEL_STR = N_("Cancel")
TEXT_CANCEL_CHECK = "cancel"
TEXT_CANCEL_BUTTON = Translator(TEXT_CANCEL_STR, TEXT_CANCEL_CHECK)
TEXT_YES_STR = N_("Yes")
TEXT_YES_CHECK = "yes"
TEXT_YES_BUTTON = Translator(TEXT_YES_STR, TEXT_YES_CHECK)
TEXT_NO_STR = N_("No")
TEXT_NO_CHECK = "no"
TEXT_NO_BUTTON = Translator(TEXT_NO_STR, TEXT_NO_CHECK)
# Make the return calls from the UIScreen input() function more clear
INPUT_PROCESSED = None
INPUT_DISCARDED = False
| gpl-2.0 |
sonata-nfv/son-cli | setup.py | 5 | 3428 | # Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from setuptools import setup, find_packages
import codecs
import os.path as path
# buildout build system
# http://www.buildout.org/en/latest/docs/tutorial.html
# setup() documentation:
# http://python-packaging-user-guide.readthedocs.org/en/
# latest/distributing/#setup-py
cwd = path.dirname(__file__)
longdesc = codecs.open(path.join(cwd, 'README.md'), 'r', 'utf-8').read()
name = 'sonata-cli'
setup(
name=name,
license='Apache License, Version 2.0',
version='3.0',
url='https://github.com/sonata-nfv/son-cli',
author_email='[email protected]',
long_description=longdesc,
package_dir={'': 'src'},
packages=find_packages('src'), # dependency resolution
namespace_packages=['son', ],
include_package_data=True,
package_data= {
'son': ['schema/tests/son-schema/*', 'workspace/samples/*',
'monitor/docker_compose_files/*', 'monitor/grafana/*',
'monitor/prometheus/*', 'monitor/*.exp',
'validate/eventcfg.yml']
},
# in jenkins, the last package in the list is installed first
install_requires=['setuptools', 'pyaml', 'jsonschema', 'validators',
'requests>2.4.2', 'coloredlogs<=5.1.1', 'paramiko',
'termcolor', 'tabulate', 'networkx<=1.12', 'Flask',
'PyJWT>=1.4.2', 'docker==2.0.2', 'scipy', 'numpy',
'watchdog', 'Flask-Cors', 'flask_cache', 'redis',
'pycrypto', 'matplotlib', 'prometheus_client',
'requests-toolbelt==0.8.0'],
zip_safe=False,
entry_points={
'console_scripts': [
'son-workspace=son.workspace.workspace:main',
'son-package=son.package.package:main',
'son-monitor=son.monitor.monitor:main',
'son-profile=son.profile.profile:main',
'son-validate=son.validate.validate:main',
'son-validate-api=son.validate.api.api:main',
'son-access=son.access.access:main'
],
},
test_suite='son',
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
| apache-2.0 |
fnaum/rez | src/rez/vendor/lockfile/sqlitelockfile.py | 487 | 5540 | from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
| lgpl-3.0 |
Vagab0nd/SiCKRAGE | lib3/dogpile/cache/proxy.py | 2 | 2601 | """
Proxy Backends
------------------
Provides a utility and a decorator class that allow for modifying the behavior
of different backends without altering the class itself or having to extend the
base backend.
.. versionadded:: 0.5.0 Added support for the :class:`.ProxyBackend` class.
"""
from .api import CacheBackend
class ProxyBackend(CacheBackend):
"""A decorator class for altering the functionality of backends.
Basic usage::
from dogpile.cache import make_region
from dogpile.cache.proxy import ProxyBackend
class MyFirstProxy(ProxyBackend):
def get(self, key):
# ... custom code goes here ...
return self.proxied.get(key)
def set(self, key, value):
# ... custom code goes here ...
self.proxied.set(key)
class MySecondProxy(ProxyBackend):
def get(self, key):
# ... custom code goes here ...
return self.proxied.get(key)
region = make_region().configure(
'dogpile.cache.dbm',
expiration_time = 3600,
arguments = {
"filename":"/path/to/cachefile.dbm"
},
wrap = [ MyFirstProxy, MySecondProxy ]
)
Classes that extend :class:`.ProxyBackend` can be stacked
together. The ``.proxied`` property will always
point to either the concrete backend instance or
the next proxy in the chain that a method can be
delegated towards.
.. versionadded:: 0.5.0
"""
def __init__(self, *args, **kwargs):
self.proxied = None
def wrap(self, backend):
""" Take a backend as an argument and setup the self.proxied property.
Return an object that be used as a backend by a :class:`.CacheRegion`
object.
"""
assert isinstance(backend, CacheBackend) or isinstance(
backend, ProxyBackend
)
self.proxied = backend
return self
#
# Delegate any functions that are not already overridden to
# the proxies backend
#
def get(self, key):
return self.proxied.get(key)
def set(self, key, value):
self.proxied.set(key, value)
def delete(self, key):
self.proxied.delete(key)
def get_multi(self, keys):
return self.proxied.get_multi(keys)
def set_multi(self, mapping):
self.proxied.set_multi(mapping)
def delete_multi(self, keys):
self.proxied.delete_multi(keys)
def get_mutex(self, key):
return self.proxied.get_mutex(key)
| gpl-3.0 |
benekastah/rock-paper-scissors | rps.py | 1 | 11894 | # pylint: disable=missing-docstring
from collections import OrderedDict, defaultdict
import select
import socket
import sys
class Style(object):
RESET = 0
BOLD = 1
UNDERSCORE = 4
BLINK = 5
INVERT = 7
CONCEAL = 8
FG_BLACK = 30
FG_RED = 31
FG_GREEN = 32
FG_YELLOW = 33
FG_BLUE = 34
FG_MAGENTA = 35
FG_CYAN = 36
FG_WHITE = 37
BG_BLACK = 40
BG_RED = 41
BG_GREEN = 42
BG_YELLOW = 44
BG_BLUE = 44
BG_MAGENTA = 45
BG_CYAN = 46
BG_WHITE = 47
@staticmethod
def encode(*attrs):
return ''.join(['\033[', ';'.join(str(a) for a in attrs), 'm'])
@staticmethod
def wrap(text, attrs=None):
if not attrs:
attrs = [Style.RESET]
start = Style.encode(*attrs)
end = Style.encode(Style.RESET)
return ''.join([start, str(text), end])
class Move(object):
superior = None
inferior = None
def __repr__(self):
return Style.wrap(self.__class__.__name__,
[Style.BG_WHITE, Style.FG_BLACK, Style.BOLD])
def __cmp__(self, other):
if isinstance(other, self.superior):
return -1
elif isinstance(other, self.inferior):
return 1
elif isinstance(other, self.__class__):
return 0
else:
raise TypeError('Can\'t compare {0} with {1}'.format(self, other))
class ROCK(Move):
def __init__(self):
self.superior = PAPER
self.inferior = SCISSORS
class PAPER(Move):
def __init__(self):
self.superior = SCISSORS
self.inferior = ROCK
class SCISSORS(Move):
def __init__(self):
self.superior = ROCK
self.inferior = PAPER
class Game(object):
winning_score = 3
def __init__(self, name, lobby):
self.name = name
self.lobby = lobby
self.winner = None
self.players = set()
self.moves = {}
self.reset_score()
def reset_score(self):
self.score = defaultdict(lambda: 0)
def add_player(self, player):
if self.full:
player.prompt('Game is already full')
return None
self.players.add(player)
return True
def other_player(self, player):
diff = self.players - {player}
(result,) = diff
return result
def try_run(self):
if self.full:
self.prompt_move()
return True
else:
return False
def prompt_move(self, player=None):
if player is not None:
ps = [self.players[player]]
else:
ps = self.players
for p in ps:
p.prompt('\nMake your move!')
@property
def full(self):
return len(self.players) >= 2
@property
def gameover(self):
return self.winner is not None
def end_game(self):
for p in self.players:
p.game = None
p.prompt()
del self.lobby.games[self.name]
def sendall(self, msg):
for p in self.players:
p.send(msg)
def show_score(self):
return '\n'.join('{0}: {1}'.format(p, self.score[p])
for p in self.players)
def play(self, player, move):
other = self.other_player(player)
if player not in self.players:
player.send('You aren\'t a player in this game')
return
if self.gameover:
player.send('Player {} already won'.format(self.winner))
if not self.full:
player.send('Wait until the game is full before playing...')
move_upper = move.upper()
if move_upper in ('R', 'ROCK'):
self.moves[player] = ROCK()
elif move_upper in ('P', 'PAPER'):
self.moves[player] = PAPER()
elif move_upper in ('S', 'SCISSORS'):
self.moves[player] = SCISSORS()
else:
player.prompt(''.join([
'Invalid move: "{}"'.format(move),
' Choose one of: (R)OCK, (P)APER or (S)CISSORS'
]))
return
if len(self.moves) == 2:
self.sendall('\n')
_players = list(self.players)
for p1, p2 in zip(_players, reversed(_players)):
p1.send('{0} threw {1}'.format(p2, self.moves[p2]))
winner = None
if self.moves[player] > self.moves[other]:
winner = player
elif self.moves[other] > self.moves[player]:
winner = other
if winner is not None:
self.score[winner] += 1
if self.score[winner] >= self.winning_score:
self.winner = winner
self.sendall('\n'.join([
'Player {} wins the game!'.format(winner),
self.show_score(),
]))
else:
self.sendall('\n'.join([
'Player {} wins the round!'.format(winner),
self.show_score(),
]))
else:
self.sendall('Tie')
self.moves = {}
if self.gameover:
self.end_game()
else:
self.prompt_move()
else:
player.send('Waiting for other player to play...')
def __repr__(self):
s = [Style.wrap(self.name, [Style.FG_GREEN])]
if self.full:
s.append(Style.wrap('(FULL)', [Style.FG_RED, Style.BOLD]))
if len(self.players):
s.append('with')
if len(self.players) == 2:
s.append('{0}, {1}'.format(*list(self.players)))
else:
(p,) = self.players
s.append(str(p))
return ' '.join(s)
class Lobby(object):
def __init__(self):
self.games = OrderedDict()
def new_game(self, name):
if name in self.games:
return 'Name "{}" taken'.format(name)
game = Game(name, lobby=self)
self.games[name] = game
return game
def list_games(self):
ls = '\n'.join(' {0}'.format(g) for _, g in self.games.iteritems())
if not ls:
ls = 'No games'
return ls
def get_game(self, name):
return self.games.get(name)
def help(self):
return '\n'.join([
'Commands:',
' ?: show this text',
' c <name>: create new game with <name>',
' j <name>: join existing game with <name>',
' l: list games',
' who: list players',
])
class Player(object):
def __init__(self, sock, lobby):
self.socket = sock
self.lobby = lobby
self.name = None
self.game = None
def prompt(self, txt=''):
if txt and not txt.endswith('\n'):
txt += '\n'
game_prompt = ''
if self.game:
if self.game.full:
game_prompt = 'playing {0} against {1} '.format(
Style.wrap(self.game.name, [Style.FG_GREEN]),
self.game.other_player(self))
else:
return
txt += '{}> '.format(game_prompt)
self.socket.send(txt)
def prompt_name(self):
self.socket.send('Please enter your name: ')
def send(self, txt):
if txt and not txt.endswith('\n'):
txt += '\n'
self.socket.send(txt)
def create_game(self, name):
game = self.lobby.new_game(name)
if isinstance(game, basestring):
msg = game
self.prompt(msg)
return
self.join_game(name)
return game
def join_game(self, name):
game = self.lobby.get_game(name)
if not game:
self.prompt('No game "{}"'.format(name))
elif game.full:
self.prompt('Game is full')
else:
game.add_player(self)
self.game = game
if not self.game.try_run():
self.send('Waiting for other player...')
def play(self, move):
self.game.play(self, move)
def fileno(self):
return self.socket.fileno()
def __repr__(self):
return Style.wrap(self.name, [Style.FG_BLUE])
def main(host, port):
"""Start a rock-paper-scissors server"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print 'Binding to {0}:{1}'.format(host, port)
server.bind((host, int(port)))
server.listen(1)
lobby = Lobby()
read_list = [server]
write_list = []
notifications = []
def disconnect(sock):
read_list.remove(sock)
write_list.remove(sock)
while True:
readable, writable, _ = select.select(read_list, write_list, [])
notify = '\n'.join(notifications)
notifications = []
if notify:
for sock in writable:
if isinstance(sock, Player):
sock.send(notify)
sock.prompt()
for sock in readable:
if sock is server:
new_client, _ = server.accept()
player = Player(new_client, lobby)
read_list.append(player)
write_list.append(player)
player.prompt_name()
elif isinstance(sock, Player):
player = sock
if notify:
player.send(notify)
data = player.socket.recv(1024)
if not data:
disconnect(player)
continue
else:
data = data.strip()
if player.game:
player.play(data)
else:
if not player.name:
if data:
player.name = data
player.prompt(Style.wrap(
'Welcome to Rock Paper Scissors! Type "?" '
'for help',
[Style.FG_MAGENTA]))
else:
player.prompt_name()
continue
if data == '?':
player.prompt(lobby.help())
elif data == 'l':
player.prompt(lobby.list_games())
elif data == 'who':
players = []
for p in read_list:
if isinstance(p, Player):
player_text = [' ', str(p)]
if p.game:
player_text.append(' in ')
player_text.append(Style.wrap(
p.game.name, [Style.FG_GREEN]))
players.append(''.join(player_text))
player.prompt('\n'.join(players))
elif data.startswith('c '):
name = data[2:]
game = player.create_game(name)
notifications.append('{0} created game {1}'.format(
player, Style.wrap(game.name, [Style.FG_GREEN])))
elif data.startswith('j '):
name = data[2:]
player.join_game(name)
notifications.append('{0} joined game {1}'.format(
player, Style.wrap(name, [Style.FG_GREEN])))
else:
player.prompt('Unrecognized command: {}'.format(data))
else:
disconnect(sock)
if __name__ == '__main__':
main(*sys.argv[1:])
| bsd-3-clause |
sol/aeson | tests/JSONTestSuite/parsers/test_json-jq.py | 5 | 1169 | #!/usr/bin/env python
import os
import subprocess
import sys
jq_paths = ["/usr/local/bin/jq", "/Users/nst/bin/jq"]
dir_path = "/Users/nst/Projects/dropbox/JSON/test_cases/"
existing_jq_paths = [p for p in jq_paths if os.path.exists(p)]
if len(existing_jq_paths) == 0:
print "-- cannot find jq"
sys.exit(1)
jq = existing_jq_paths[0]
for root, dirs, files in os.walk(dir_path):
json_files = (f for f in files if f.endswith(".json"))
for filename in json_files:
path = os.path.join(root, filename)
print "*"*80
print path
parsing_success = subprocess.call([jq, ".", path]) == 0
if filename.startswith("y_") and parsing_success == False:
print "jq\tSHOULD_HAVE_PASSED\t%s" % (filename)
elif filename.startswith("n_") and parsing_success == True:
print "jq\tSHOULD_HAVE_FAILED\t%s" % (filename)
elif filename.startswith("i_") and parsing_success == True:
print "jq\tIMPLEMENTATION_PASS\t%s" % (filename)
elif filename.startswith("i_") and parsing_success == False:
print "jq\tIMPLEMENTATION_FAIL\t%s" % (filename)
| bsd-3-clause |
cxxgtxy/tensorflow | tensorflow/contrib/data/python/kernel_tests/sequence_dataset_op_test.py | 6 | 8032 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SequenceDatasetTest(test.TestCase):
def testRepeatTensorDataset(self):
"""Test a dataset that repeats its input multiple times."""
components = [np.array(1), np.array([1, 2, 3]), np.array(37.0)]
# This placeholder can be fed when dataset-definition subgraph
# runs (i.e. `init_op` below) to configure the number of
# repetitions used in a particular iterator.
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components)
.repeat(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test a finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 3})
for _ in range(3):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test a different finite repetition.
sess.run(init_op, feed_dict={count_placeholder: 7})
for _ in range(7):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an empty repetition.
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test an infinite repetition.
# NOTE(mrry): There's not a good way to test that the sequence
# actually is infinite.
sess.run(init_op, feed_dict={count_placeholder: -1})
for _ in range(17):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
def testTakeTensorDataset(self):
components = [np.arange(10)]
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.take(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Take fewer than input size
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take more than input size
sess.run(init_op, feed_dict={count_placeholder: 25})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take all of input
sess.run(init_op, feed_dict={count_placeholder: -1})
for i in range(10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Take nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSkipTensorDataset(self):
components = [np.arange(10)]
count_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.skip(count_placeholder).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Skip fewer than input size, we should skip
# the first 4 elements and then read the rest.
sess.run(init_op, feed_dict={count_placeholder: 4})
for i in range(4, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip more than input size: get nothing.
sess.run(init_op, feed_dict={count_placeholder: 25})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip exactly input size.
sess.run(init_op, feed_dict={count_placeholder: 10})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Set -1 for 'count': skip the entire dataset.
sess.run(init_op, feed_dict={count_placeholder: -1})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Skip nothing
sess.run(init_op, feed_dict={count_placeholder: 0})
for i in range(0, 10):
results = sess.run(get_next)
self.assertAllEqual(results, components[0][i:i+1])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatRepeatTensorDataset(self):
"""Test the composition of repeat datasets."""
components = [np.array(1), np.array([1, 2, 3]), np.array(37.0)]
inner_count = array_ops.placeholder(dtypes.int64, shape=[])
outer_count = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (dataset_ops.Dataset.from_tensors(components).repeat(inner_count)
.repeat(outer_count).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op, feed_dict={inner_count: 7, outer_count: 14})
for _ in range(7 * 14):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRepeatEmptyDataset(self):
"""Test that repeating an empty dataset does not hang."""
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10)
.repeat(-1).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
"Attempted to repeat an empty dataset infinitely."):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| apache-2.0 |
wavelets/zipline | zipline/utils/test_utils.py | 5 | 3103 | from contextlib import contextmanager
from logbook import FileHandler
from zipline.finance.blotter import ORDER_STATUS
from six import itervalues
import pandas as pd
def to_utc(time_str):
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def setup_logger(test, path='test.log'):
test.log_handler = FileHandler(path)
test.log_handler.push_application()
def teardown_logger(test):
test.log_handler.pop_application()
test.log_handler.close()
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
class ExceptionSource(object):
def __init__(self):
pass
def get_hash(self):
return "ExceptionSource"
def __iter__(self):
return self
def next(self):
5 / 0
def __next__(self):
5 / 0
class ExceptionTransform(object):
def __init__(self):
self.window_length = 1
pass
def get_hash(self):
return "ExceptionTransform"
def update(self, event):
assert False, "An assertion message"
@contextmanager
def nullctx():
"""
Null context manager. Useful for conditionally adding a contextmanager in
a single line, e.g.:
with SomeContextManager() if some_expr else nullcontext:
do_stuff()
"""
yield
| apache-2.0 |
steebchen/youtube-dl | youtube_dl/extractor/sohu.py | 50 | 6911 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
int_or_none,
try_get,
)
class SohuIE(InfoExtractor):
_VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
# Sohu videos give different MD5 sums on Travis CI and my machine
_TESTS = [{
'note': 'This video is available only in Mainland China',
'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
'info_dict': {
'id': '382479172',
'ext': 'mp4',
'title': 'MV:Far East Movement《The Illest》',
},
'skip': 'On available in China',
}, {
'url': 'http://tv.sohu.com/20150305/n409385080.shtml',
'info_dict': {
'id': '409385080',
'ext': 'mp4',
'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》',
}
}, {
'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml',
'info_dict': {
'id': '78693464',
'ext': 'mp4',
'title': '【爱范品】第31期:MWC见不到的奇葩手机',
}
}, {
'note': 'Multipart video',
'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml',
'info_dict': {
'id': '78910339',
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
},
'playlist': [{
'info_dict': {
'id': '78910339_part1',
'ext': 'mp4',
'duration': 294,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'info_dict': {
'id': '78910339_part2',
'ext': 'mp4',
'duration': 300,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'info_dict': {
'id': '78910339_part3',
'ext': 'mp4',
'duration': 150,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}]
}, {
'note': 'Video with title containing dash',
'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml',
'info_dict': {
'id': '78932792',
'ext': 'mp4',
'title': 'youtube-dl testing video',
},
'params': {
'skip_download': True
}
}]
def _real_extract(self, url):
def _fetch_data(vid_id, mytv=False):
if mytv:
base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
else:
base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='
return self._download_json(
base_data_url + vid_id, video_id,
'Downloading JSON data for %s' % vid_id,
headers=self.geo_verification_headers())
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
mytv = mobj.group('mytv') is not None
webpage = self._download_webpage(url, video_id)
title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage))
vid = self._html_search_regex(
r'var vid ?= ?["\'](\d+)["\']',
webpage, 'video path')
vid_data = _fetch_data(vid, mytv)
if vid_data['play'] != 1:
if vid_data.get('status') == 12:
raise ExtractorError(
'%s said: There\'s something wrong in the video.' % self.IE_NAME,
expected=True)
else:
self.raise_geo_restricted(
'%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME)
formats_json = {}
for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
vid_id = vid_data['data'].get('%sVid' % format_id)
if not vid_id:
continue
vid_id = compat_str(vid_id)
formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)
part_count = vid_data['data']['totalBlocks']
playlist = []
for i in range(part_count):
formats = []
for format_id, format_data in formats_json.items():
allot = format_data['allot']
data = format_data['data']
clips_url = data['clipsURL']
su = data['su']
video_url = 'newflv.sohu.ccgslb.net'
cdnId = None
retries = 0
while 'newflv.sohu.ccgslb.net' in video_url:
params = {
'prot': 9,
'file': clips_url[i],
'new': su[i],
'prod': 'flash',
'rb': 1,
}
if cdnId is not None:
params['idc'] = cdnId
download_note = 'Downloading %s video URL part %d of %d' % (
format_id, i + 1, part_count)
if retries > 0:
download_note += ' (retry #%d)' % retries
part_info = self._parse_json(self._download_webpage(
'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
video_id, download_note), video_id)
video_url = part_info['url']
cdnId = part_info.get('nid')
retries += 1
if retries > 5:
raise ExtractorError('Failed to get video URL')
formats.append({
'url': video_url,
'format_id': format_id,
'filesize': int_or_none(
try_get(data, lambda x: x['clipsBytes'][i])),
'width': int_or_none(data.get('width')),
'height': int_or_none(data.get('height')),
'fps': int_or_none(data.get('fps')),
})
self._sort_formats(formats)
playlist.append({
'id': '%s_part%d' % (video_id, i + 1),
'title': title,
'duration': vid_data['data']['clipsDuration'][i],
'formats': formats,
})
if len(playlist) == 1:
info = playlist[0]
info['id'] = video_id
else:
info = {
'_type': 'multi_video',
'entries': playlist,
'id': video_id,
'title': title,
}
return info
| unlicense |
brettgoldstein3/brettgoldstein-site | lib/flask/app.py | 345 | 76786 | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package
from . import json
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on expected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is not application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where they key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns `None` or raises a `BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the `flaskext` module. For example in
#: case of a "Flask-Foo" extension in `flaskext.foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return Config(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
methods = set(methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
# due to a werkzeug bug we need to make sure that the defaults are
# None if they are an empty dictionary. This should not be necessary
# with Werkzeug 0.7
options['defaults'] = options.get('defaults') or None
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func is not view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, integer_types):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead if will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
self._got_first_request = True
for func in self.before_first_request_funcs:
func()
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns `True` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list of
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status = headers = None
if isinstance(rv, tuple):
rv, status, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers, status=status)
headers = status = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status is not None:
if isinstance(status, string_types):
rv.status = status
else:
rv.status_code = status
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as error:
pass
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=None):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is None:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
rv = func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:func:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
| apache-2.0 |
Rubisk/mcedit2 | src/mcedit2/plugins.py | 1 | 10201 | """
plugins
"""
from __future__ import absolute_import, division, print_function
from collections import defaultdict
import logging
import itertools
import os
import imp
import traceback
from mcedit2 import editortools
from mcedit2.editortools import generate
from mcedit2.util import load_ui
from mcedit2.util.settings import Settings
from mcedit2.widgets import inspector
from mceditlib.anvil import entities
log = logging.getLogger(__name__)
import sys
sys.dont_write_bytecode = True
settings = Settings().getNamespace("plugins")
enabledPluginsSetting = settings.getOption("enabled_plugins", "json", {})
autoReloadSetting = settings.getOption("auto_reload", bool, True)
# *** plugins dialog will need to:
# v get a list of (plugin display name, plugin reference, isEnabled) tuples for loaded and
# unloaded plugins.
# v enable or disable a plugin using its reference
# - reload a plugin
# - find out if a plugin was removed from the folder or failed to compile or run
# - install a new plugin using a file chooser
# - open the plugins folder(s) in Finder/Explorer
# *** on startup:
# v scan all plugins dirs for plugins
# - check if a plugin is enabled (without executing it?)
# - load plugins set to "enabled" in settings
# *** on app foreground:
# - rescan all plugins dirs
# - show new plugins to user, ask whether to load them
# - when in dev mode (??)
# - check mod times of all plugin files under each PluginRef
# - if auto-reload is on, reload plugins
# - if auto-reload is off, ??? prompt to enable autoreload?
# --- Plugin refs ---
class PluginRef(object):
_displayName = None
def __init__(self, filename, pluginsDir):
self.filename = filename
self.pluginsDir = pluginsDir
self.pluginModule = None # None indicates the plugin is not loaded
self.loadError = None
self.unloadError = None
self.timestamps = {}
def checkTimestamps(self):
"""
Record the modification time for this plugin's file and return True if it differs
from the previously recorded time.
If self.filename specifies a directory, walks the directory tree and records the mod
times of all files and directories found.
:return:
"""
timestamps = {}
filename = os.path.join(self.pluginsDir, self.filename)
if os.path.isdir(filename):
for dirname, subdirs, files in os.walk(filename):
for child in itertools.chain(subdirs, files):
pathname = os.path.join(dirname, child)
modtime = os.stat(pathname).st_mtime
timestamps[pathname] = modtime
else:
modtime = os.stat(filename).st_mtime
timestamps[filename] = modtime
changed = timestamps != self.timestamps
self.timestamps = timestamps
return changed
def findModule(self):
"""
Returns (file, pathname, description).
May raise ImportError, EnvironmentError, maybe others?
If it is not none, caller is responsible for closing file. (see `imp.find_module`)
"""
basename, ext = os.path.splitext(self.filename)
return imp.find_module(basename, [self.pluginsDir])
def load(self):
if self.pluginModule:
return
basename, ext = os.path.splitext(self.filename)
io = None
try:
io, pathname, description = self.findModule()
log.info("Trying to load plugin from %s", self.filename)
global _currentPluginPathname
_currentPluginPathname = pathname
self.pluginModule = imp.load_module(basename, io, pathname, description)
registerModule(self.fullpath, self.pluginModule)
_currentPluginPathname = None
if hasattr(self.pluginModule, 'displayName'):
self._displayName = self.pluginModule.displayName
log.info("Loaded %s (%s)", self.filename, self.displayName)
except Exception as e:
self.loadError = traceback.format_exc()
log.exception("Error while loading plugin from %s: %r", self.filename, e)
else:
self.loadError = None
finally:
if io:
io.close()
def unload(self):
if self.pluginModule is None:
return
try:
unregisterModule(self.fullpath, self.pluginModule)
for k, v in sys.modules.iteritems():
if v == self.pluginModule:
sys.modules.pop(k)
break
except Exception as e:
self.unloadError = traceback.format_exc()
log.exception("Error while unloading plugin from %s: %r", self.filename, e)
else:
self.unloadError = None
self.pluginModule = None
@property
def isLoaded(self):
return self.pluginModule is not None
@property
def displayName(self):
if self._displayName:
return self._displayName
return os.path.splitext(os.path.basename(self.filename))[0]
def exists(self):
return os.path.exists(self.fullpath)
@property
def fullpath(self):
return os.path.join(self.pluginsDir, self.filename)
@property
def enabled(self):
enabledPlugins = enabledPluginsSetting.value()
return enabledPlugins.get(self.filename, True)
@enabled.setter
def enabled(self, value):
value = bool(value)
enabledPlugins = enabledPluginsSetting.value()
enabledPlugins[self.filename] = value
enabledPluginsSetting.setValue(enabledPlugins)
# --- Plugin finding ---
_pluginRefs = {}
def getAllPlugins():
"""
Return all known plugins as a list of `PluginRef`s
:return: list[PluginRef]
:rtype:
"""
return list(_pluginRefs.values())
def findNewPluginsInDir(pluginsDir):
if not os.path.isdir(pluginsDir):
log.warn("Plugins dir %s not found", pluginsDir)
return
log.info("Loading plugins from %s", pluginsDir)
for filename in os.listdir(pluginsDir):
if filename not in _pluginRefs:
ref = detectPlugin(filename, pluginsDir)
if ref:
ref.checkTimestamps()
_pluginRefs[filename] = ref
def detectPlugin(filename, pluginsDir):
io = None
basename, ext = os.path.splitext(filename)
if ext in (".pyc", ".pyo"):
return None
ref = PluginRef(filename, pluginsDir)
try:
io, pathname, description = ref.findModule()
except Exception as e:
log.exception("Could not detect %s as a plugin or module: %s", filename, e)
return None
else:
return ref
finally:
if io:
io.close()
# --- Plugin registration ---
_loadedModules = {}
_pluginClassesByPathname = defaultdict(list)
_currentPluginPathname = None
def registerModule(filename, pluginModule):
if hasattr(pluginModule, "register"):
pluginModule.register()
_loadedModules[filename] = pluginModule
pluginModule.__FOUND_FILENAME__ = filename
def unregisterModule(filename, pluginModule):
if hasattr(pluginModule, "unregister"):
pluginModule.unregister()
classes = _pluginClassesByPathname.pop(filename)
if classes:
for cls in classes:
_unregisterClass(cls)
_loadedModules.pop(pluginModule.__FOUND_FILENAME__)
def _registerClass(cls):
_pluginClassesByPathname[_currentPluginPathname].append(cls)
def _unregisterClass(cls):
load_ui.unregisterCustomWidget(cls)
editortools.unregisterToolClass(cls)
generate.unregisterGeneratePlugin(cls)
inspector.unregisterBlockInspectorWidget(cls)
entities.unregisterTileEntityRefClass(cls)
# --- Registration functions ---
def registerCustomWidget(cls):
"""
Register a custom QWidget class with the .ui file loader. This allows custom QWidget
classes to be used in .ui files.
>>> from PySide import QtGui
>>> @registerCustomWidget
>>> class MyWidget(QtGui.QWidget):
>>> pass
:param cls:
:type cls: class
:return:
:rtype: class
"""
_registerClass(cls)
return load_ui.registerCustomWidget(cls)
def registerToolClass(cls):
"""
Register a tool class. Class must inherit from EditorTool.
>>> from mcedit2.editortools import EditorTool
>>> @registerToolClass
>>> class MyTool(EditorTool):
>>> pass
:param cls:
:type cls: class
:return:
:rtype: class
"""
_registerClass(cls)
return editortools.registerToolClass(cls)
def registerGeneratePlugin(cls):
"""
Register a plugin for the Generate Tool. Class must inherit from GeneratePlugin.
>>> from mcedit2.editortools.generate import GeneratePlugin
>>> @registerGeneratePlugin
>>> class MyGeneratePlugin(GeneratePlugin):
>>> pass
:param cls:
:type cls:
:return:
:rtype:
"""
_registerClass(cls)
return generate.registerGeneratePlugin(cls)
def registerBlockInspectorWidget(ID, cls):
"""
Register a widget with the Block Inspector to use when inspecting TileEntities
that have the given ID.
xxx make ID an attribute of cls?
>>> from PySide import QtGui
>>> class MyBarrelInspector(QtGui.QWidget):
>>> pass
>>> registerBlockInspectorWidget("MyBarrel", MyBarrelInspector)
:param cls:
:type cls:
:return:
:rtype:
"""
_registerClass(cls)
return inspector.registerBlockInspectorWidget(ID, cls)
def registerTileEntityRefClass(ID, cls):
"""
Register a TileEntityRef class with the world loader to create when loading a TileEntity
with the given ID.
xxx specify world format here, too.
>>> from mceditlib.anvil.entities import PCTileEntityRefBase
>>> class MyBarrelRef(PCTileEntityRefBase):
>>> pass
>>> registerTileEntityRefClass("MyBarrel", MyBarrelRef)
:param cls:
:type cls:
:return:
:rtype:
"""
# xxx this is anvil.entities - delegate to correct world format
_registerClass(cls)
return entities.registerTileEntityRefClass(ID, cls)
| bsd-3-clause |
leopittelli/Django-on-App-Engine-Example | django/utils/http.py | 29 | 9645 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlparse,
urlencode as original_urlencode)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = s.encode('utf-8') # base64encode should only return ASCII.
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
except ValueError:
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if not url:
return False
# Chrome treats \ completely as /
url = url.replace('\\', '/')
# Chrome considers any URL with more than two slashes to be absolute, but
# urlaprse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
return (not url_info.netloc or url_info.netloc == host) and \
(not url_info.scheme or url_info.scheme in ['http', 'https'])
| mit |
sheepray/volatility | volatility/win32/modules.py | 58 | 1117 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: AAron Walters and Nick Petroni
@license: GNU General Public License 2.0
@contact: [email protected], [email protected]
@organization: Volatility Foundation
"""
#pylint: disable-msg=C0111
import volatility.win32.tasks as tasks
def lsmod(addr_space):
""" A Generator for modules """
for m in tasks.get_kdbg(addr_space).modules():
yield m
| gpl-2.0 |
mozilla-metrics/fhr-toolbox | mrjob/churn-analysis.py | 2 | 5297 | """
Analyze a historical week to understand Firefox churn.
"""
import healthreportutils
from datetime import date, datetime, timedelta
import os, shutil, csv
import sys, codecs
import traceback
import mrjob
from mrjob.job import MRJob
import tempfile
try:
import simplejson as json
except ImportError:
import json
# How many days must a user be gone to be considered "lost"?
LAG_DAYS = 49
CRITICAL_WEEKS = 9
TOTAL_DAYS = 180
main_channels = (
'nightly',
'aurora',
'beta',
'release'
)
def last_saturday(d):
"""Return the Saturday on or before the date."""
# .weekday in python starts on 0=Monday
return d - timedelta(days=(d.weekday() + 2) % 7)
def start_date(dstr):
"""
Measure Sunday-Saturday, for no particularly good reason.
"""
snapshot = datetime.strptime(dstr, "%Y-%m-%d").date()
startdate = last_saturday(snapshot)
return startdate
def date_back(start, days):
"""iter backwards from start for N days"""
date = start
for n in xrange(0, days):
yield date - timedelta(days=n)
def active_day(day):
if day is None:
return False
return any(k != "org.mozilla.crashes.crashes" for k in day)
def logexceptions(func):
def wrapper(job, k, v):
try:
for k1, v1 in func(job, k, v):
yield (k1, v1)
except:
exc = traceback.format_exc()
print >>sys.stderr, "Script exception: ", exc
raise
return wrapper
@logexceptions
@healthreportutils.FHRMapper()
def map(job, key, payload):
pingDate = payload.get("thisPingDate", "unknown")
channel = payload.channel.split("-")[0]
if channel not in main_channels:
return
days = payload.get('data', {}).get('days', {})
def get_day(d):
dstr = d.strftime("%Y-%m-%d")
return days.get(dstr, None)
version = payload.get("geckoAppInfo", {}).get("version", "?")
sd = start_date(job.options.start_date)
# Was the user active at all in the 49 days prior to the snapshot
recent_usage = 0
for d in date_back(sd, LAG_DAYS):
day = get_day(d)
if active_day(day):
recent_usage = 1
break
# For each of the "critical" 9 weeks, record both usage days and default
# status.
week_actives = []
for weekno in xrange(0, CRITICAL_WEEKS):
week_end = sd - timedelta(days=LAG_DAYS + 7 * weekno)
active_days = 0
default_browser = None
for d in date_back(week_end, 7):
day = get_day(d)
if active_day(day):
active_days += 1
if default_browser is None:
default_browser = day.get("org.mozilla.appInfo.appinfo", {}).get("isDefaultBrowser", None)
if default_browser is None:
default_browser = "?"
week_actives.append(active_days)
week_actives.append(default_browser)
prior_usage = 0
for d in date_back(sd - timedelta(days=LAG_DAYS + 7 * CRITICAL_WEEKS),
180 - (LAG_DAYS + 7 * CRITICAL_WEEKS)):
day = get_day(d)
if active_day(day):
prior_usage = True
break
osname = payload.last.get("org.mozilla.sysinfo.sysinfo", {}).get("name", "?")
locale = payload.last.get("org.mozilla.appInfo.appinfo", {}).get("locale", "?")
geo = payload.get("geoCountry", "?")
yield ("result", [channel, osname, locale, geo, pingDate, recent_usage] + week_actives + [prior_usage])
class AggJob(MRJob):
HADOOP_INPUT_FORMAT="org.apache.hadoop.mapred.SequenceFileAsTextInputFormat"
INPUT_PROTOCOL = mrjob.protocol.RawProtocol
def run_job(self):
self.stdout = tempfile.TemporaryFile()
if self.options.start_date is None:
raise Exception("--start-date is required")
# validate the start date here
start_date(self.options.start_date)
# Do the big work
super(AggJob, self).run_job()
# Produce the separated output files
outpath = self.options.output_path
if outpath is None:
outpath = os.path.expanduser("~/fhr-churnanalysis-" + self.options.start_date + ".csv")
output(self.stdout, outpath)
def configure_options(self):
super(AggJob, self).configure_options()
self.add_passthrough_option('--output-path', help="Specify output path",
default=None)
self.add_passthrough_option('--start-date', help="Specify start date",
default=None)
def mapper(self, key, value):
return map(self, key, value)
def getresults(fd):
fd.seek(0)
for line in fd:
k, v = line.split("\t")
yield json.loads(k), json.loads(v)
def unwrap(l, v):
"""
Unwrap a value into a list. Dicts are added in their repr form.
"""
if isinstance(v, (tuple, list)):
for e in v:
unwrap(l, e)
elif isinstance(v, dict):
l.append(repr(v))
elif isinstance(v, unicode):
l.append(v.encode("utf-8"))
else:
l.append(v)
def output(fd, path):
outfd = open(path, "w")
csvw = csv.writer(outfd)
for k, v in getresults(fd):
csvw.writerow(v)
if __name__ == '__main__':
AggJob.run()
| apache-2.0 |
mark-ignacio/phantomjs | src/breakpad/src/tools/gyp/tools/pretty_sln.py | 137 | 4977 | #!/usr/bin/python2.5
# Copyright 2009 Google Inc.
# All Rights Reserved.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
'}"\) = "(.*)", "(.*)", "(.*)"$'))
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, dep_list) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
if __name__ == '__main__':
main()
| bsd-3-clause |
maxrothman/aws-alfred-workflow | venv/lib/python2.7/site-packages/jmespath/functions.py | 21 | 13008 | import math
import json
import weakref
from jmespath import exceptions
from jmespath.compat import string_type as STRING_TYPE
from jmespath.compat import get_methods
# python types -> jmespath types
TYPES_MAP = {
'bool': 'boolean',
'list': 'array',
'dict': 'object',
'NoneType': 'null',
'unicode': 'string',
'str': 'string',
'float': 'number',
'int': 'number',
'OrderedDict': 'object',
'_Projection': 'array',
'_Expression': 'expref',
}
# jmespath types -> python types
REVERSE_TYPES_MAP = {
'boolean': ('bool',),
'array': ('list', '_Projection'),
'object': ('dict', 'OrderedDict',),
'null': ('None',),
'string': ('unicode', 'str'),
'number': ('float', 'int'),
'expref': ('_Expression',),
}
def populate_function_table(cls):
func_table = cls.FUNCTION_TABLE
for name, method in get_methods(cls):
signature = getattr(method, 'signature', None)
if signature is not None:
func_table[name[6:]] = {"function": method,
"signature": signature}
return cls
def builtin_function(*arguments):
def _record_arity(func):
func.signature = arguments
return func
return _record_arity
@populate_function_table
class RuntimeFunctions(object):
# The built in functions are automatically populated in the FUNCTION_TABLE
# using the @builtin_function decorator on methods defined in this class.
FUNCTION_TABLE = {
}
def __init__(self):
self._interpreter = None
@property
def interpreter(self):
if self._interpreter is None:
return None
else:
return self._interpreter()
@interpreter.setter
def interpreter(self, value):
# A weakref is used because we have
# a cyclic reference and we want to allow
# for the memory to be properly freed when
# the objects are no longer needed.
self._interpreter = weakref.ref(value)
def call_function(self, function_name, resolved_args):
try:
spec = self.FUNCTION_TABLE[function_name]
except KeyError:
raise exceptions.UnknownFunctionError(
"Unknown function: %s()" % function_name)
function = spec['function']
signature = spec['signature']
self._validate_arguments(resolved_args, signature, function_name)
return function(self, *resolved_args)
def _validate_arguments(self, args, signature, function_name):
if signature and signature[-1].get('variadic'):
if len(args) < len(signature):
raise exceptions.VariadictArityError(
len(signature), len(args), function_name)
elif len(args) != len(signature):
raise exceptions.ArityError(
len(signature), len(args), function_name)
return self._type_check(args, signature, function_name)
def _type_check(self, actual, signature, function_name):
for i in range(len(signature)):
allowed_types = signature[i]['types']
if allowed_types:
self._type_check_single(actual[i], allowed_types,
function_name)
def _type_check_single(self, current, types, function_name):
# Type checking involves checking the top level type,
# and in the case of arrays, potentially checking the types
# of each element.
allowed_types, allowed_subtypes = self._get_allowed_pytypes(types)
# We're not using isinstance() on purpose.
# The type model for jmespath does not map
# 1-1 with python types (booleans are considered
# integers in python for example).
actual_typename = type(current).__name__
if actual_typename not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, current,
self._convert_to_jmespath_type(actual_typename), types)
# If we're dealing with a list type, we can have
# additional restrictions on the type of the list
# elements (for example a function can require a
# list of numbers or a list of strings).
# Arrays are the only types that can have subtypes.
if allowed_subtypes:
self._subtype_check(current, allowed_subtypes,
types, function_name)
def _get_allowed_pytypes(self, types):
allowed_types = []
allowed_subtypes = []
for t in types:
type_ = t.split('-', 1)
if len(type_) == 2:
type_, subtype = type_
allowed_subtypes.append(REVERSE_TYPES_MAP[subtype])
else:
type_ = type_[0]
allowed_types.extend(REVERSE_TYPES_MAP[type_])
return allowed_types, allowed_subtypes
def _subtype_check(self, current, allowed_subtypes, types, function_name):
if len(allowed_subtypes) == 1:
# The easy case, we know up front what type
# we need to validate.
allowed_subtypes = allowed_subtypes[0]
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed_subtypes:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
elif len(allowed_subtypes) > 1 and current:
# Dynamic type validation. Based on the first
# type we see, we validate that the remaining types
# match.
first = type(current[0]).__name__
for subtypes in allowed_subtypes:
if first in subtypes:
allowed = subtypes
break
else:
raise exceptions.JMESPathTypeError(
function_name, current[0], first, types)
for element in current:
actual_typename = type(element).__name__
if actual_typename not in allowed:
raise exceptions.JMESPathTypeError(
function_name, element, actual_typename, types)
@builtin_function({'types': ['number']})
def _func_abs(self, arg):
return abs(arg)
@builtin_function({'types': ['array-number']})
def _func_avg(self, arg):
return sum(arg) / float(len(arg))
@builtin_function({'types': [], 'variadic': True})
def _func_not_null(self, *arguments):
for argument in arguments:
if argument is not None:
return argument
@builtin_function({'types': []})
def _func_to_array(self, arg):
if isinstance(arg, list):
return arg
else:
return [arg]
@builtin_function({'types': []})
def _func_to_string(self, arg):
if isinstance(arg, STRING_TYPE):
return arg
else:
return json.dumps(arg, separators=(',', ':'),
default=str)
@builtin_function({'types': []})
def _func_to_number(self, arg):
if isinstance(arg, (list, dict, bool)):
return None
elif arg is None:
return None
elif isinstance(arg, (int, float)):
return arg
else:
try:
if '.' in arg:
return float(arg)
else:
return int(arg)
except ValueError:
return None
@builtin_function({'types': ['array', 'string']}, {'types': []})
def _func_contains(self, subject, search):
return search in subject
@builtin_function({'types': ['string', 'array', 'object']})
def _func_length(self, arg):
return len(arg)
@builtin_function({'types': ['string']}, {'types': ['string']})
def _func_ends_with(self, search, suffix):
return search.endswith(suffix)
@builtin_function({'types': ['string']}, {'types': ['string']})
def _func_starts_with(self, search, suffix):
return search.startswith(suffix)
@builtin_function({'types': ['array', 'string']})
def _func_reverse(self, arg):
if isinstance(arg, STRING_TYPE):
return arg[::-1]
else:
return list(reversed(arg))
@builtin_function({"types": ['number']})
def _func_ceil(self, arg):
return math.ceil(arg)
@builtin_function({"types": ['number']})
def _func_floor(self, arg):
return math.floor(arg)
@builtin_function({"types": ['string']}, {"types": ['array-string']})
def _func_join(self, separator, array):
return separator.join(array)
@builtin_function({'types': ['expref']}, {'types': ['array']})
def _func_map(self, expref, arg):
result = []
for element in arg:
result.append(self.interpreter.visit(expref.expression, element))
return result
@builtin_function({"types": ['array-number', 'array-string']})
def _func_max(self, arg):
if arg:
return max(arg)
else:
return None
@builtin_function({"types": ["object"], "variadic": True})
def _func_merge(self, *arguments):
merged = {}
for arg in arguments:
merged.update(arg)
return merged
@builtin_function({"types": ['array-number', 'array-string']})
def _func_min(self, arg):
if arg:
return min(arg)
else:
return None
@builtin_function({"types": ['array-string', 'array-number']})
def _func_sort(self, arg):
return list(sorted(arg))
@builtin_function({"types": ['array-number']})
def _func_sum(self, arg):
return sum(arg)
@builtin_function({"types": ['object']})
def _func_keys(self, arg):
# To be consistent with .values()
# should we also return the indices of a list?
return list(arg.keys())
@builtin_function({"types": ['object']})
def _func_values(self, arg):
return list(arg.values())
@builtin_function({'types': []})
def _func_type(self, arg):
if isinstance(arg, STRING_TYPE):
return "string"
elif isinstance(arg, bool):
return "boolean"
elif isinstance(arg, list):
return "array"
elif isinstance(arg, dict):
return "object"
elif isinstance(arg, (float, int)):
return "number"
elif arg is None:
return "null"
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_sort_by(self, array, expref):
if not array:
return array
# sort_by allows for the expref to be either a number of
# a string, so we have some special logic to handle this.
# We evaluate the first array element and verify that it's
# either a string of a number. We then create a key function
# that validates that type, which requires that remaining array
# elements resolve to the same type as the first element.
required_type = self._convert_to_jmespath_type(
type(self.interpreter.visit(expref.expression, array[0])).__name__)
if required_type not in ['number', 'string']:
raise exceptions.JMESPathTypeError(
'sort_by', array[0], required_type, ['string', 'number'])
keyfunc = self._create_key_func(expref.expression,
[required_type],
'sort_by')
return list(sorted(array, key=keyfunc))
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_min_by(self, array, expref):
keyfunc = self._create_key_func(expref.expression,
['number', 'string'],
'min_by')
return min(array, key=keyfunc)
@builtin_function({'types': ['array']}, {'types': ['expref']})
def _func_max_by(self, array, expref):
keyfunc = self._create_key_func(expref.expression,
['number', 'string'],
'min_by')
return max(array, key=keyfunc)
def _create_key_func(self, expr_node, allowed_types, function_name):
interpreter = self.interpreter
def keyfunc(x):
result = interpreter.visit(expr_node, x)
actual_typename = type(result).__name__
jmespath_type = self._convert_to_jmespath_type(actual_typename)
# allowed_types is in term of jmespath types, not python types.
if jmespath_type not in allowed_types:
raise exceptions.JMESPathTypeError(
function_name, result, jmespath_type, allowed_types)
return result
return keyfunc
def _convert_to_jmespath_type(self, pyobject):
return TYPES_MAP.get(pyobject, 'unknown')
| mit |
davidwaroquiers/pymatgen | pymatgen/analysis/tests/test_structure_matcher.py | 5 | 47994 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import itertools
import json
import os
import unittest
import numpy as np
from monty.json import MontyDecoder
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.defects.core import Interstitial, Substitution, Vacancy
from pymatgen.analysis.structure_matcher import (
ElementComparator,
FrameworkComparator,
OccupancyComparator,
OrderDisorderElementComparator,
PointDefectComparator,
StructureMatcher,
)
from pymatgen.core import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list_pbc
from pymatgen.util.testing import PymatgenTest
class StructureMatcherTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json"), "r") as fp:
entries = json.load(fp, cls=MontyDecoder)
self.struct_list = [e.structure for e in entries]
self.oxi_structs = [
self.get_structure("Li2O"),
Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.Li2O")),
]
def test_ignore_species(self):
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "LiFePO4.cif"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR"))
m = StructureMatcher(ignored_species=["Li"], primitive_cell=False, attempt_supercell=True)
self.assertTrue(m.fit(s1, s2))
self.assertTrue(m.fit_anonymous(s1, s2))
groups = m.group_structures([s1, s2])
self.assertEqual(len(groups), 1)
s2.make_supercell((2, 1, 1))
ss1 = m.get_s2_like_s1(s2, s1, include_ignored_species=True)
self.assertAlmostEqual(ss1.lattice.a, 20.820740000000001)
self.assertEqual(ss1.composition.reduced_formula, "LiFePO4")
self.assertEqual(
{k.symbol: v.symbol for k, v in m.get_best_electronegativity_anonymous_mapping(s1, s2).items()},
{"Fe": "Fe", "P": "P", "O": "O"},
)
def test_get_supercell_size(self):
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.9)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu", "Ag"], [[0] * 3] * 5)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
sm = StructureMatcher(supercell_size="volume")
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="num_sites")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size="Ag")
self.assertEqual(sm._get_supercell_size(s1, s2), (2, False))
self.assertEqual(sm._get_supercell_size(s2, s1), (2, True))
sm = StructureMatcher(supercell_size=["Ag", "Cu"])
self.assertEqual(sm._get_supercell_size(s1, s2), (1, True))
self.assertEqual(sm._get_supercell_size(s2, s1), (1, True))
sm = StructureMatcher(supercell_size="wfieoh")
self.assertRaises(ValueError, sm._get_supercell_size, s1, s2)
def test_cmp_fstruct(self):
sm = StructureMatcher()
s1 = np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]])
s2 = np.array([[0.11, 0.22, 0.33]])
frac_tol = np.array([0.02, 0.03, 0.04])
mask = np.array([[False, False]])
mask2 = np.array([[True, False]])
self.assertRaises(ValueError, sm._cmp_fstruct, s2, s1, frac_tol, mask.T)
self.assertRaises(ValueError, sm._cmp_fstruct, s1, s2, frac_tol, mask.T)
self.assertTrue(sm._cmp_fstruct(s1, s2, frac_tol, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol / 2, mask))
self.assertFalse(sm._cmp_fstruct(s1, s2, frac_tol, mask2))
def test_cart_dists(self):
sm = StructureMatcher()
l = Lattice.orthorhombic(1, 2, 3)
s1 = np.array([[0.13, 0.25, 0.37], [0.1, 0.2, 0.3]])
s2 = np.array([[0.11, 0.22, 0.33]])
s3 = np.array([[0.1, 0.2, 0.3], [0.11, 0.2, 0.3]])
s4 = np.array([[0.1, 0.2, 0.3], [0.1, 0.6, 0.7]])
mask = np.array([[False, False]])
mask2 = np.array([[False, True]])
mask3 = np.array([[False, False], [False, False]])
mask4 = np.array([[False, True], [False, True]])
n1 = (len(s1) / l.volume) ** (1 / 3)
n2 = (len(s2) / l.volume) ** (1 / 3)
self.assertRaises(ValueError, sm._cart_dists, s2, s1, l, mask.T, n2)
self.assertRaises(ValueError, sm._cart_dists, s1, s2, l, mask.T, n1)
d, ft, s = sm._cart_dists(s1, s2, l, mask, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [-0.01, -0.02, -0.03]))
self.assertTrue(np.allclose(s, [1]))
# check that masking best value works
d, ft, s = sm._cart_dists(s1, s2, l, mask2, n1)
self.assertTrue(np.allclose(d, [0]))
self.assertTrue(np.allclose(ft, [0.02, 0.03, 0.04]))
self.assertTrue(np.allclose(s, [0]))
# check that averaging of translation is done properly
d, ft, s = sm._cart_dists(s1, s3, l, mask3, n1)
self.assertTrue(np.allclose(d, [0.08093341] * 2))
self.assertTrue(np.allclose(ft, [0.01, 0.025, 0.035]))
self.assertTrue(np.allclose(s, [1, 0]))
# check distances are large when mask allows no 'real' mapping
d, ft, s = sm._cart_dists(s1, s4, l, mask4, n1)
self.assertTrue(np.min(d) > 1e8)
self.assertTrue(np.min(ft) > 1e8)
def test_get_mask(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
result = [
[True, False, True, False],
[True, False, True, False],
[True, True, False, True],
]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertEqual(inds, [2])
# test supercell with match
result = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s1, s2, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 2)
self.assertTrue(np.allclose(inds, np.array([4])))
# test supercell without match
result = [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test s2_supercell
result = [
[1, 1, 1],
[1, 1, 1],
[0, 0, 1],
[0, 0, 1],
[1, 1, 0],
[1, 1, 0],
[0, 0, 1],
[0, 0, 1],
]
m, inds, i = sm._get_mask(s2, s1, 2, False)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 0)
self.assertTrue(np.allclose(inds, np.array([])))
# test for multiple translation indices
s1 = Structure(l, ["Cu", "Ag", "Cu", "Ag", "Ag"], [[0] * 3] * 5)
s2 = Structure(l, ["Ag", "Cu", "Ag"], [[0] * 3] * 3)
result = [[1, 0, 1, 0, 0], [0, 1, 0, 1, 1], [1, 0, 1, 0, 0]]
m, inds, i = sm._get_mask(s1, s2, 1, True)
self.assertTrue(np.all(m == result))
self.assertTrue(i == 1)
self.assertTrue(np.allclose(inds, [0, 2]))
def test_get_supercells(self):
sm = StructureMatcher(comparator=ElementComparator())
l = Lattice.cubic(1)
l2 = Lattice.cubic(0.5)
s1 = Structure(l, ["Mg", "Cu", "Ag", "Cu"], [[0] * 3] * 4)
s2 = Structure(l2, ["Cu", "Cu", "Ag"], [[0] * 3] * 3)
scs = list(sm._get_supercells(s1, s2, 8, False))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 4)
self.assertEqual(len(x[1]), 24)
self.assertEqual(len(scs), 48)
scs = list(sm._get_supercells(s2, s1, 8, True))
for x in scs:
self.assertAlmostEqual(abs(np.linalg.det(x[3])), 8)
self.assertEqual(len(x[0]), 24)
self.assertEqual(len(x[1]), 4)
self.assertEqual(len(scs), 48)
def test_fit(self):
"""
Take two known matched structures
1) Ensure match
2) Ensure match after translation and rotations
3) Ensure no-match after large site translation
4) Ensure match after site shuffling
"""
sm = StructureMatcher()
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test rotational/translational invariance
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 30, False, np.array([0.4, 0.7, 0.9]))
self.struct_list[1].apply_operation(op)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test failure under large atomic translation
self.struct_list[1].translate_sites([0], [0.4, 0.4, 0.2], frac_coords=True)
self.assertFalse(sm.fit(self.struct_list[0], self.struct_list[1]))
self.struct_list[1].translate_sites([0], [-0.4, -0.4, -0.2], frac_coords=True)
# random.shuffle(editor._sites)
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
# Test FrameworkComporator
sm2 = StructureMatcher(comparator=FrameworkComparator())
lfp = self.get_structure("LiFePO4")
nfp = self.get_structure("NaFePO4")
self.assertTrue(sm2.fit(lfp, nfp))
self.assertFalse(sm.fit(lfp, nfp))
# Test anonymous fit.
self.assertEqual(sm.fit_anonymous(lfp, nfp), True)
self.assertAlmostEqual(sm.get_rms_anonymous(lfp, nfp)[0], 0.060895871160262717)
# Test partial occupancies.
s1 = Structure(
Lattice.cubic(3),
[{"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
s2 = Structure(
Lattice.cubic(3),
[{"Fe": 0.25}, {"Fe": 0.5}, {"Fe": 0.5}, {"Fe": 0.75}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertFalse(sm.fit(s1, s2))
self.assertFalse(sm.fit(s2, s1))
s2 = Structure(
Lattice.cubic(3),
[{"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}, {"Mn": 0.5}],
[[0, 0, 0], [0.25, 0.25, 0.25], [0.5, 0.5, 0.5], [0.75, 0.75, 0.75]],
)
self.assertEqual(sm.fit_anonymous(s1, s2), True)
self.assertAlmostEqual(sm.get_rms_anonymous(s1, s2)[0], 0)
# test symmetric
sm_coarse = sm = StructureMatcher(
comparator=ElementComparator(),
ltol=0.6,
stol=0.6,
angle_tol=6,
)
s1 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s1.vasp")
s2 = Structure.from_file(PymatgenTest.TEST_FILES_DIR / "fit_symm_s2.vasp")
self.assertEqual(sm_coarse.fit(s1, s2), True)
self.assertEqual(sm_coarse.fit(s2, s1), False)
self.assertEqual(sm_coarse.fit(s1, s2, symmetric=True), False)
self.assertEqual(sm_coarse.fit(s2, s1, symmetric=True), False)
def test_oxi(self):
"""Test oxidation state removal matching"""
sm = StructureMatcher()
self.assertFalse(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
sm = StructureMatcher(comparator=ElementComparator())
self.assertTrue(sm.fit(self.oxi_structs[0], self.oxi_structs[1]))
def test_primitive(self):
"""Test primitive cell reduction"""
sm = StructureMatcher(primitive_cell=True)
self.struct_list[1].make_supercell([[2, 0, 0], [0, 3, 0], [0, 0, 1]])
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
def test_class(self):
# Tests entire class as single working unit
sm = StructureMatcher()
# Test group_structures and find_indices
out = sm.group_structures(self.struct_list)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
self.assertEqual(sum(map(len, out)), len(self.struct_list))
for s in self.struct_list[::2]:
s.replace_species({"Ti": "Zr", "O": "Ti"})
out = sm.group_structures(self.struct_list, anonymous=True)
self.assertEqual(list(map(len, out)), [4, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1])
def test_mix(self):
structures = [
self.get_structure("Li2O"),
self.get_structure("Li2O2"),
self.get_structure("LiFePO4"),
]
for fname in ["POSCAR.Li2O", "POSCAR.LiFePO4"]:
structures.append(Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, fname)))
sm = StructureMatcher(comparator=ElementComparator())
groups = sm.group_structures(structures)
for g in groups:
formula = g[0].composition.reduced_formula
if formula in ["Li2O", "LiFePO4"]:
self.assertEqual(len(g), 2)
else:
self.assertEqual(len(g), 1)
def test_left_handed_lattice(self):
"""Ensure Left handed lattices are accepted"""
sm = StructureMatcher()
s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li3GaPCO7.json"))
self.assertTrue(sm.fit(s, s))
def test_as_dict_and_from_dict(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.2,
angle_tol=2,
primitive_cell=False,
scale=False,
comparator=FrameworkComparator(),
)
d = sm.as_dict()
sm2 = StructureMatcher.from_dict(d)
self.assertEqual(sm2.as_dict(), d)
def test_no_scaling(self):
sm = StructureMatcher(ltol=0.1, stol=0.1, angle_tol=2, scale=False, comparator=ElementComparator())
self.assertTrue(sm.fit(self.struct_list[0], self.struct_list[1]))
self.assertTrue(sm.get_rms_dist(self.struct_list[0], self.struct_list[1])[0] < 0.0008)
def test_supercell_fit(self):
sm = StructureMatcher(attempt_supercell=False)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Al3F9_distorted.json"))
self.assertFalse(sm.fit(s1, s2))
sm = StructureMatcher(attempt_supercell=True)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
def test_get_lattices(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l1 = Lattice.from_parameters(1, 2.1, 1.9, 90, 89, 91)
l2 = Lattice.from_parameters(1.1, 2, 2, 89, 91, 90)
s1 = Structure(l1, [], [])
s2 = Structure(l2, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s2.lattice))
self.assertEqual(len(lattices), 16)
l3 = Lattice.from_parameters(1.1, 2, 20, 89, 91, 90)
s3 = Structure(l3, [], [])
lattices = list(sm._get_lattices(s=s1, target_lattice=s3.lattice))
self.assertEqual(len(lattices), 0)
def test_find_match1(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [0.7, 0.5, 0.375]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=True, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
fc = s2.frac_coords + match[3]
fc -= np.round(fc)
self.assertAlmostEqual(np.sum(fc), 0.9)
self.assertAlmostEqual(np.sum(fc[:, :2]), 0.1)
cart_dist = np.sum(match[1] * (l.volume / 3) ** (1 / 3))
self.assertAlmostEqual(cart_dist, 0.15)
def test_find_match2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=True,
scale=True,
attempt_supercell=False,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si"], [[0, 0, 0.1], [0, 0, 0.2]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [0, 0.1, -0.95]])
s1, s2, fu, s1_supercell = sm._preprocess(s1, s2, False)
match = sm._strict_match(s1, s2, fu, s1_supercell=False, use_rms=True, break_on_match=False)
scale_matrix = match[2]
s2.make_supercell(scale_matrix)
s2.translate_sites(range(len(s2)), match[3])
self.assertAlmostEqual(np.sum(s2.frac_coords) % 1, 0.3)
self.assertAlmostEqual(np.sum(s2.frac_coords[:, :2]) % 1, 0)
def test_supercell_subsets(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="volume",
)
sm_no_s = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [0, 2, 1, 3, 4, 5]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test when s1 is exact supercell of s2
result = sm.get_s2_like_s1(s1, s2)
for a, b in zip(s1, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2))
self.assertTrue(sm.fit(s2, s1))
self.assertTrue(sm_no_s.fit(s1, s2))
self.assertTrue(sm_no_s.fit(s2, s1))
rms = (0.048604032430991401, 0.059527539448807391)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, s1), rms))
# test when the supercell is a subset of s2
subset_supercell = s1.copy()
del subset_supercell[0]
result = sm.get_s2_like_s1(subset_supercell, s2)
self.assertEqual(len(result), 6)
for a, b in zip(subset_supercell, result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(subset_supercell, s2))
self.assertTrue(sm.fit(s2, subset_supercell))
self.assertFalse(sm_no_s.fit(subset_supercell, s2))
self.assertFalse(sm_no_s.fit(s2, subset_supercell))
rms = (0.053243049896333279, 0.059527539448807336)
self.assertTrue(np.allclose(sm.get_rms_dist(subset_supercell, s2), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2, subset_supercell), rms))
# test when s2 (once made a supercell) is a subset of s1
s2_missing_site = s2.copy()
del s2_missing_site[1]
result = sm.get_s2_like_s1(s1, s2_missing_site)
for a, b in zip((s1[i] for i in (0, 2, 4, 5)), result):
self.assertTrue(a.distance(b) < 0.08)
self.assertEqual(a.species, b.species)
self.assertTrue(sm.fit(s1, s2_missing_site))
self.assertTrue(sm.fit(s2_missing_site, s1))
self.assertFalse(sm_no_s.fit(s1, s2_missing_site))
self.assertFalse(sm_no_s.fit(s2_missing_site, s1))
rms = (0.029763769724403633, 0.029763769724403987)
self.assertTrue(np.allclose(sm.get_rms_dist(s1, s2_missing_site), rms))
self.assertTrue(np.allclose(sm.get_rms_dist(s2_missing_site, s1), rms))
def test_get_s2_large_s2(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=False,
attempt_supercell=True,
allow_subset=False,
supercell_size="volume",
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
l2 = Lattice.orthorhombic(1.01, 2.01, 3.01)
s2 = Structure(l2, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
s2.make_supercell([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
result = sm.get_s2_like_s1(s1, s2)
for x, y in zip(s1, result):
self.assertLess(x.distance(y), 0.08)
def test_get_mapping(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Ag", "Si", "Si"], [[0.7, 0.4, 0.5], [0, 0, 0.1], [0, 0, 0.2]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, -0.95], [0, 0.1, 0], [-0.7, 0.5, 0.375]])
shuffle = [2, 0, 1, 3, 5, 4]
s1 = Structure.from_sites([s1[i] for i in shuffle])
# test the mapping
s2.make_supercell([2, 1, 1])
# equal sizes
for i, x in enumerate(sm.get_mapping(s1, s2)):
self.assertEqual(s1[x].species, s2[i].species)
del s1[0]
# s1 is subset of s2
for i, x in enumerate(sm.get_mapping(s2, s1)):
self.assertEqual(s1[i].species, s2[x].species)
# s2 is smaller than s1
del s2[0]
del s2[1]
self.assertRaises(ValueError, sm.get_mapping, s2, s1)
def test_get_supercell_matrix(self):
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
)
l = Lattice.orthorhombic(1, 2, 3)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([2, 1, 1])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-2, 0, 0], [0, 1, 0], [0, 0, 1]]).all())
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s1.make_supercell([[1, -1, 0], [0, 0, -1], [0, 1, 0]])
s2 = Structure(l, ["Si", "Si", "Ag"], [[0, 0.1, 0], [0, 0.1, -0.95], [-0.7, 0.5, 0.375]])
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
# test when the supercell is a subset
sm = StructureMatcher(
ltol=0.1,
stol=0.3,
angle_tol=2,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
)
del s1[0]
result = sm.get_supercell_matrix(s1, s2)
self.assertTrue((result == [[-1, -1, 0], [0, 0, -1], [0, 1, 0]]).all())
def test_subset(self):
sm = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=False,
allow_subset=True,
)
l = Lattice.orthorhombic(10, 20, 30)
s1 = Structure(l, ["Si", "Si", "Ag"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
result = sm.get_s2_like_s1(s1, s2)
mindists = np.min(s1.lattice.get_all_distances(s1.frac_coords, result.frac_coords), axis=0)
self.assertLess(np.max(mindists), 1e-6)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0, 0, 0.1])), 1)
self.assertEqual(len(find_in_coord_list_pbc(result.frac_coords, [0.7, 0.4, 0.5])), 1)
# test with not enough sites in s1
# test with fewer species in s2
s1 = Structure(l, ["Si", "Ag", "Cl"], [[0, 0, 0.1], [0, 0, 0.2], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Si"], [[0, 0.1, 0], [-0.7, 0.5, 0.4]])
self.assertEqual(sm.get_s2_like_s1(s1, s2), None)
def test_out_of_cell_s2_like_s1(self):
l = Lattice.cubic(5)
s1 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, -0.02], [0, 0, 0.001], [0.7, 0.4, 0.5]])
s2 = Structure(l, ["Si", "Ag", "Si"], [[0, 0, 0.98], [0, 0, 0.99], [0.7, 0.4, 0.5]])
new_s2 = StructureMatcher(primitive_cell=False).get_s2_like_s1(s1, s2)
dists = np.sum((s1.cart_coords - new_s2.cart_coords) ** 2, axis=-1) ** 0.5
self.assertLess(np.max(dists), 0.1)
def test_disordered_primitive_to_ordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.75, 0.5, 0.5]]
prim = Structure(lp, [{"Na": 0.5}, {"Cl": 0.5}], pcoords)
supercell = Structure(ls, ["Na", "Cl"], scoords)
supercell.make_supercell([[-1, 1, 0], [0, 1, 1], [1, 0, 0]])
self.assertFalse(sm_sites.fit(prim, supercell))
self.assertTrue(sm_atoms.fit(prim, supercell))
self.assertRaises(ValueError, sm_atoms.get_s2_like_s1, prim, supercell)
self.assertEqual(len(sm_atoms.get_s2_like_s1(supercell, prim)), 4)
def test_ordered_primitive_to_disordered_supercell(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_atoms",
comparator=OrderDisorderElementComparator(),
)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
ls = Lattice.orthorhombic(20, 20, 30)
scoords = [[0, 0, 0], [0.5, 0, 0], [0.25, 0.5, 0.5], [0.75, 0.5, 0.5]]
s1 = Structure(lp, ["Na", "Cl"], pcoords)
s2 = Structure(ls, [{"Na": 0.5}, {"Na": 0.5}, {"Cl": 0.5}, {"Cl": 0.5}], scoords)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_atoms.fit(s1, s2))
def test_disordered_to_disordered(self):
sm_atoms = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=False,
comparator=OrderDisorderElementComparator(),
)
lp = Lattice.orthorhombic(10, 20, 30)
coords = [[0.0, 0.0, 0.0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Cl": 0.5}], coords)
s2 = Structure(lp, [{"Na": 0.5, "Cl": 0.5}, {"Na": 0.5, "Br": 0.5}], coords)
self.assertFalse(sm_atoms.fit(s1, s2))
def test_occupancy_comparator(self):
lp = Lattice.orthorhombic(10, 20, 30)
pcoords = [[0, 0, 0], [0.5, 0.5, 0.5]]
s1 = Structure(lp, [{"Na": 0.6, "K": 0.4}, "Cl"], pcoords)
s2 = Structure(lp, [{"Xa": 0.4, "Xb": 0.6}, "Cl"], pcoords)
s3 = Structure(lp, [{"Xa": 0.5, "Xb": 0.5}, "Cl"], pcoords)
sm_sites = StructureMatcher(
ltol=0.2,
stol=0.3,
angle_tol=5,
primitive_cell=False,
scale=True,
attempt_supercell=True,
allow_subset=True,
supercell_size="num_sites",
comparator=OccupancyComparator(),
)
self.assertTrue(sm_sites.fit(s1, s2))
self.assertFalse(sm_sites.fit(s1, s3))
def test_electronegativity(self):
sm = StructureMatcher(ltol=0.2, stol=0.3, angle_tol=5)
s1 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PAsO4S4.json"))
s2 = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "Na2Fe2PNO4Se4.json"))
self.assertEqual(
sm.get_best_electronegativity_anonymous_mapping(s1, s2),
{
Element("S"): Element("Se"),
Element("As"): Element("N"),
Element("Fe"): Element("Fe"),
Element("Na"): Element("Na"),
Element("P"): Element("P"),
Element("O"): Element("O"),
},
)
self.assertEqual(len(sm.get_all_anonymous_mappings(s1, s2)), 2)
# test include_dist
dists = {Element("N"): 0, Element("P"): 0.0010725064}
for mapping, d in sm.get_all_anonymous_mappings(s1, s2, include_dist=True):
self.assertAlmostEqual(dists[mapping[Element("As")]], d)
def test_rms_vs_minimax(self):
# This tests that structures with adjusted RMS less than stol, but minimax
# greater than stol are treated properly
# stol=0.3 gives exactly an ftol of 0.1 on the c axis
sm = StructureMatcher(ltol=0.2, stol=0.301, angle_tol=1, primitive_cell=False)
l = Lattice.orthorhombic(1, 2, 12)
sp = ["Si", "Si", "Al"]
s1 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.5]])
s2 = Structure(l, sp, [[0.5, 0, 0], [0, 0, 0], [0, 0, 0.6]])
self.assertArrayAlmostEqual(sm.get_rms_dist(s1, s2), (0.32 ** 0.5 / 2, 0.4))
self.assertEqual(sm.fit(s1, s2), False)
self.assertEqual(sm.fit_anonymous(s1, s2), False)
self.assertEqual(sm.get_mapping(s1, s2), None)
class PointDefectComparatorTest(PymatgenTest):
def test_defect_matching(self):
# SETUP DEFECTS FOR TESTING
# symmorphic defect test set
s_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CsSnI3.cif")) # tetragonal CsSnI3
identical_Cs_vacs = [Vacancy(s_struc, s_struc[0]), Vacancy(s_struc, s_struc[1])]
identical_I_vacs_sublattice1 = [
Vacancy(s_struc, s_struc[4]),
Vacancy(s_struc, s_struc[5]),
Vacancy(s_struc, s_struc[8]),
Vacancy(s_struc, s_struc[9]),
] # in plane halides
identical_I_vacs_sublattice2 = [
Vacancy(s_struc, s_struc[6]),
Vacancy(s_struc, s_struc[7]),
] # out of plane halides
pdc = PointDefectComparator()
# NOW TEST DEFECTS
# test vacancy matching
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[0])) # trivial vacancy test
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], identical_Cs_vacs[1])) # vacancies on same sublattice
for i, j in itertools.combinations(range(4), 2):
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice1[i], identical_I_vacs_sublattice1[j]))
self.assertTrue(pdc.are_equal(identical_I_vacs_sublattice2[0], identical_I_vacs_sublattice2[1]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# both vacancies, but different specie types
identical_I_vacs_sublattice1[0],
)
)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same specie type, different sublattice
identical_I_vacs_sublattice2[0],
)
)
# test substitutional matching
sub_Cs_on_I_sublattice1_set1 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[0].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice1_set2 = PeriodicSite(
"Cs", identical_I_vacs_sublattice1[1].site.frac_coords, s_struc.lattice
)
sub_Cs_on_I_sublattice2 = PeriodicSite("Cs", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
sub_Rb_on_I_sublattice2 = PeriodicSite("Rb", identical_I_vacs_sublattice2[0].site.frac_coords, s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial substitution test
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong specie)
Substitution(s_struc, sub_Cs_on_I_sublattice2),
Substitution(s_struc, sub_Rb_on_I_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different subs (wrong sublattice)
Substitution(s_struc, sub_Cs_on_I_sublattice1_set1),
Substitution(s_struc, sub_Cs_on_I_sublattice2),
)
)
# test symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by saturatated_
# interstitial_structure function)
inter_H_sublattice1_set1 = PeriodicSite("H", [0.0, 0.75, 0.25], s_struc.lattice)
inter_H_sublattice1_set2 = PeriodicSite("H", [0.0, 0.75, 0.75], s_struc.lattice)
inter_H_sublattice2 = PeriodicSite("H", [0.57796112, 0.06923687, 0.56923687], s_struc.lattice)
inter_H_sublattice3 = PeriodicSite("H", [0.25, 0.25, 0.54018268], s_struc.lattice)
inter_He_sublattice3 = PeriodicSite("He", [0.25, 0.25, 0.54018268], s_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong sublattice)
Interstitial(s_struc, inter_H_sublattice1_set1),
Interstitial(s_struc, inter_H_sublattice3),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(s_struc, inter_H_sublattice3),
Interstitial(s_struc, inter_He_sublattice3),
)
)
# test non-symmorphic interstitial matching
# (using set generated from Voronoi generator, with same sublattice given by
# saturatated_interstitial_structure function)
ns_struc = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "CuCl.cif"))
ns_inter_H_sublattice1_set1 = PeriodicSite("H", [0.06924513, 0.06308959, 0.86766528], ns_struc.lattice)
ns_inter_H_sublattice1_set2 = PeriodicSite("H", [0.43691041, 0.36766528, 0.06924513], ns_struc.lattice)
ns_inter_H_sublattice2 = PeriodicSite("H", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
ns_inter_He_sublattice2 = PeriodicSite("He", [0.06022109, 0.60196031, 0.1621814], ns_struc.lattice)
self.assertTrue(
pdc.are_equal( # trivial interstitial test
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal( # same sublattice, different coords
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
Interstitial(ns_struc, ns_inter_H_sublattice1_set2),
)
)
self.assertFalse(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# different interstitials (wrong sublattice)
Interstitial(ns_struc, ns_inter_H_sublattice2),
)
)
self.assertFalse(
pdc.are_equal( # different interstitials (wrong specie)
Interstitial(ns_struc, ns_inter_H_sublattice2),
Interstitial(ns_struc, ns_inter_He_sublattice2),
)
)
# test influence of charge on defect matching (default is to be charge agnostic)
vac_diff_chg = identical_Cs_vacs[0].copy()
vac_diff_chg.set_charge(3.0)
self.assertTrue(pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
chargecheck_pdc = PointDefectComparator(check_charge=True) # switch to PDC which cares about charge state
self.assertFalse(chargecheck_pdc.are_equal(identical_Cs_vacs[0], vac_diff_chg))
# test different supercell size
# (comparing same defect but different supercells - default is to not check for this)
sc_agnostic_pdc = PointDefectComparator(check_primitive_cell=True)
sc_scaled_s_struc = s_struc.copy()
sc_scaled_s_struc.make_supercell([2, 2, 3])
sc_scaled_I_vac_sublatt1_ps1 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[0].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_ps2 = PeriodicSite(
"I",
identical_I_vacs_sublattice1[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt2_ps = PeriodicSite(
"I",
identical_I_vacs_sublattice2[1].site.coords,
sc_scaled_s_struc.lattice,
coords_are_cartesian=True,
)
sc_scaled_I_vac_sublatt1_defect1 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps1)
sc_scaled_I_vac_sublatt1_defect2 = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt1_ps2)
sc_scaled_I_vac_sublatt2_defect = Vacancy(sc_scaled_s_struc, sc_scaled_I_vac_sublatt2_ps)
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect site but between different supercells
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[1],
# same coords, different lattice structure
sc_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[1], sc_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same sublattice, different coords
sc_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], sc_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
sc_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defects (wrong sublattice)
sc_scaled_I_vac_sublatt2_defect,
)
)
# test same structure size, but scaled lattice volume
# (default is to not allow these to be equal, but check_lattice_scale=True allows for this)
vol_agnostic_pdc = PointDefectComparator(check_lattice_scale=True)
vol_scaled_s_struc = s_struc.copy()
vol_scaled_s_struc.scale_lattice(s_struc.volume * 0.95)
vol_scaled_I_vac_sublatt1_defect1 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[4])
vol_scaled_I_vac_sublatt1_defect2 = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[5])
vol_scaled_I_vac_sublatt2_defect = Vacancy(vol_scaled_s_struc, vol_scaled_s_struc[6])
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# trivially same defect (but vol change)
vol_scaled_I_vac_sublatt1_defect1,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect1))
self.assertFalse(
pdc.are_equal(
identical_I_vacs_sublattice1[0],
# same defect, different sublattice point (and vol change)
vol_scaled_I_vac_sublatt1_defect2,
)
)
self.assertTrue(vol_agnostic_pdc.are_equal(identical_I_vacs_sublattice1[0], vol_scaled_I_vac_sublatt1_defect2))
self.assertFalse(
vol_agnostic_pdc.are_equal(
identical_I_vacs_sublattice1[0],
# different defect (wrong sublattice)
vol_scaled_I_vac_sublatt2_defect,
)
)
# test identical defect which has had entire lattice shifted
shift_s_struc = s_struc.copy()
shift_s_struc.translate_sites(range(len(s_struc)), [0.2, 0.3, 0.4], frac_coords=True, to_unit_cell=True)
shifted_identical_Cs_vacs = [
Vacancy(shift_s_struc, shift_s_struc[0]),
Vacancy(shift_s_struc, shift_s_struc[1]),
]
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but shifted)
shifted_identical_Cs_vacs[0],
)
)
self.assertTrue(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and shifted)
shifted_identical_Cs_vacs[1],
)
)
# test uniform lattice shift within non-symmorphic structure
shift_ns_struc = ns_struc.copy()
shift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
shift_ns_inter_H_sublattice1_set1 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set1.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
shift_ns_inter_H_sublattice1_set2 = PeriodicSite(
"H",
ns_inter_H_sublattice1_set2.frac_coords + [0.0, 0.6, 0.3],
shift_ns_struc.lattice,
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# trivially same defect (but shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set1),
)
)
self.assertTrue(
pdc.are_equal(
Interstitial(ns_struc, ns_inter_H_sublattice1_set1),
# same defect on different sublattice point (and shifted)
Interstitial(shift_ns_struc, shift_ns_inter_H_sublattice1_set2),
)
)
# test a rotational + supercell type structure transformation (requires check_primitive_cell=True)
rotated_s_struc = s_struc.copy()
rotated_s_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
rotated_identical_Cs_vacs = [
Vacancy(rotated_s_struc, rotated_s_struc[0]),
Vacancy(rotated_s_struc, rotated_s_struc[1]),
]
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# trivially same defect (but rotated)
rotated_identical_Cs_vacs[0],
)
)
self.assertTrue(sc_agnostic_pdc.are_equal(identical_Cs_vacs[0], rotated_identical_Cs_vacs[0]))
self.assertFalse(
pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice (and rotated)
rotated_identical_Cs_vacs[1],
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
identical_Cs_vacs[0],
# same defect on different sublattice point (and rotated)
rotated_identical_Cs_vacs[1],
)
)
# test a rotational + supercell + shift type structure transformation for non-symmorphic structure
rotANDshift_ns_struc = ns_struc.copy()
rotANDshift_ns_struc.translate_sites(range(len(ns_struc)), [0.0, 0.6, 0.3], frac_coords=True, to_unit_cell=True)
rotANDshift_ns_struc.make_supercell([[2, 1, 0], [-1, 3, 0], [0, 0, 2]])
ns_vac_Cs_set1 = Vacancy(ns_struc, ns_struc[0])
rotANDshift_ns_vac_Cs_set1 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[0])
rotANDshift_ns_vac_Cs_set2 = Vacancy(rotANDshift_ns_struc, rotANDshift_ns_struc[1])
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# trivially same defect (but rotated and sublattice shifted)
rotANDshift_ns_vac_Cs_set1,
)
)
self.assertTrue(
sc_agnostic_pdc.are_equal(
ns_vac_Cs_set1,
# same defect on different sublattice point (shifted and rotated)
rotANDshift_ns_vac_Cs_set2,
)
)
if __name__ == "__main__":
unittest.main()
| mit |
undoware/neutron-drive | google_appengine/google/appengine/api/files/gs.py | 3 | 11379 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Storage specific Files API calls."""
from __future__ import with_statement
__all__ = ['create']
import os
import re
from urllib import urlencode
from xml.dom import minidom
from google.appengine.api import app_identity
from google.appengine.api import urlfetch
from google.appengine.api.files import file as files
from google.appengine.api.files import file_service_pb
_GS_FILESYSTEM = files.GS_FILESYSTEM
_GS_PREFIX = '/' + _GS_FILESYSTEM + '/'
_MIME_TYPE_PARAMETER = 'content_type'
_CANNED_ACL_PARAMETER = 'acl'
_CONTENT_ENCODING_PARAMETER = 'content_encoding'
_CONTENT_DISPOSITION_PARAMETER = 'content_disposition'
_CACHE_CONTROL_PARAMETER = 'cache_control'
_USER_METADATA_PREFIX = 'x-goog-meta-'
_GS_RESTFUL_URL = 'commondatastorage.googleapis.com'
_GS_RESTFUL_SCOPE_READ_ONLY = (
'https://www.googleapis.com/auth/devstorage.read_only')
_GS_RESTFUL_API_VERSION = '2'
_GS_BUCKETPATH_REGEX = re.compile(r'/gs/[a-z0-9\.\-_]{3,}$')
_GS_FILEPATH_REGEX = re.compile(r'/gs/[a-z0-9\.\-_]{3,}')
def parseGlob(filename):
"""Parse a Gs filename or a filename pattern. Handle escape of '*' and '/'.
Args:
filename: a filename or filename pattern.
filename must be a valid gs filepath in the format of
'/gs/bucket/filename'. filename pattern has format '/gs/bucket/prefix*'.
filename pattern represents filenames with the given prefix in the bucket.
Please escape '*' and '\' with '\' if your filename contains them. We
recommend using Python raw string to simplify escape expressions.
Returns:
A (string, string) tuple if filename is a pattern. The first string is
the bucket name, second is the prefix or '' if prefix doesn't exist.
Properly escaped filename if filename is not a pattern.
example
'/gs/bucket1/file1' => '/gs/bucket1/file1'
'/gs/bucket2/*' => ('gs/bucket2', '') all files under bucket2
'/gs/bucket3/p*' => ('gs/bucket2', 'p') files under bucket3 with
a prefix 'p' in its name
r'/gs/bucket/file\*' => '/gs/bucket/file*'
r'/gs/bucket/file\\*' => ('/gs/bucket', r'file\') all files under bucket
with prefix r'file\'
r'/gs/bucket/file\\\*' => '/gs/bucket/file\*'
r'/gs/bucket/file\**' => ('/gs/bucket', 'file*') all files under bucket
with prefix 'file*'
Raises:
google.appengine.api.files.InvalidFileNameError if filename is illegal.
"""
if not filename:
raise files.InvalidFileNameError('filename is None.')
if not isinstance(filename, basestring):
raise files.InvalidFileNameError('filename %s should be of type string' %
filename)
match = _GS_FILEPATH_REGEX.match(filename)
if not match:
raise files.InvalidFileNameError(
'filename %s should start with/gs/bucketname', filename)
bucketname = match.group(0)
rest = filename[len(bucketname):]
if not rest or (len(rest) == 1 and rest[0] == '/'):
return bucketname, ''
if not rest.startswith('/'):
raise files.InvalidFileNameError(
'Expect / to separate bucketname and filename in %s' % filename)
i = 1
prefix = False
processed = ''
while i < len(rest):
char = rest[i]
if char == '\\':
if i + 1 == len(rest):
processed += char
else:
processed += rest[i + 1]
i += 1
elif char == '*':
if i + 1 != len(rest):
raise files.InvalidFileNameError('* as a wildcard is not the last.')
prefix = True
else:
processed += char
i += 1
if prefix:
return bucketname, processed
else:
return bucketname + '/' + processed
def listdir(path, kwargs=None):
"""Return a sorted list of filenames (matching a pattern) in the given path.
Sorting (decrease by string) is done automatically by Google Cloud Storage.
Args:
path: a Google Cloud Storage path of "/gs/bucketname" form.
kwargs: other keyword arguments to be relayed to Google Cloud Storage.
This can be used to select certain files with names matching a pattern.
Supported keywords:
marker: a string after which (exclusive) to start listing.
max_keys: the maximum number of filenames to return.
prefix: limits the returned filenames to those with this prefix. no regex.
See Google Cloud Storage documentation for more details and examples.
https://developers.google.com/storage/docs/reference-methods#getbucket
Returns:
a sorted list containing filenames (matching a pattern) from
the given path. The last filename can be used as a marker for another
request for more files.
"""
if not path:
raise files.InvalidFileNameError('Empty path')
elif not isinstance(path, basestring):
raise files.InvalidFileNameError('Expected string for path %s' % path)
elif not _GS_BUCKETPATH_REGEX.match(path):
raise files.InvalidFileNameError(
'Google storage path must have the form /gs/bucketname')
if kwargs and kwargs.has_key('max_keys'):
kwargs['max-keys'] = kwargs['max_keys']
kwargs.pop('max_keys')
if not os.environ.get('DATACENTER'):
return _listdir_local(path, kwargs)
bucketname = path[len(_GS_PREFIX):]
request_headers = {
'Authorization': 'OAuth %s' % app_identity.get_access_token(
_GS_RESTFUL_SCOPE_READ_ONLY)[0],
'x-goog-api-version': _GS_RESTFUL_API_VERSION
}
url = 'https://%s/%s' % (_GS_RESTFUL_URL, bucketname)
if kwargs:
url += '/?' + urlencode(kwargs)
response = urlfetch.fetch(url=url,
headers=request_headers,
deadline=60)
if response.status_code == 404:
raise files.InvalidFileNameError('Bucket %s does not exist.' % bucketname)
elif response.status_code == 401:
raise files.PermissionDeniedError('Permission denied to read bucket %s.' %
bucketname)
dom = minidom.parseString(response.content)
def __textValue(node):
return node.firstChild.nodeValue
error = dom.getElementsByTagName('Error')
if len(error) == 1:
details = error[0].getElementsByTagName('Details')
if len(details) == 1:
raise files.InvalidParameterError(__textValue(details[0]))
else:
code = __textValue(error[0].getElementsByTagName('Code')[0])
msg = __textValue(error[0].getElementsByTagName('Message')[0])
raise files.InvalidParameterError('%s: %s' % (code, msg))
return ['/'.join([path, __textValue(key)]) for key in
dom.getElementsByTagName('Key')]
def _listdir_local(path, kwargs):
"""Dev app server version of listdir.
See listdir for doc.
"""
request = file_service_pb.ListDirRequest()
response = file_service_pb.ListDirResponse()
request.set_path(path)
if kwargs and kwargs.has_key('marker'):
request.set_marker(kwargs['marker'])
if kwargs and kwargs.has_key('max-keys'):
request.set_max_keys(kwargs['max-keys'])
if kwargs and kwargs.has_key('prefix'):
request.set_prefix(kwargs['prefix'])
files._make_call('ListDir', request, response)
return response.filenames_list()
def create(filename,
mime_type='application/octet-stream',
acl=None,
cache_control=None,
content_encoding=None,
content_disposition=None,
user_metadata=None):
"""Create a writable googlestore file.
Args:
filename: Google Storage object name (/gs/bucket/object)
mime_type: Blob content MIME type as string.
acl: Canned acl to apply to the object as per:
http://code.google.com/apis/storage/docs/reference-headers.html#xgoogacl
If not specified (or set to None), default object acl is used.
cache_control: Cache control header to set when serving through Google
storage. If not specified, default of 3600 seconds is used.
content_encoding: If object is compressed, specify the compression method
here to set the header correctly when served through Google Storage.
content_disposition: Header to use when serving through Google Storage.
user_metadata: Dictionary specifying key value pairs to apply to the
object. Each key is prefixed with x-goog-meta- when served through
Google Storage.
Returns:
A writable file name for a Google Storage file. This file can be opened for
write by File API open function. To read the file call file::open with the
plain Google Storage filename (/gs/bucket/object).
"""
if not filename:
raise files.InvalidArgumentError('Empty filename')
elif not isinstance(filename, basestring):
raise files.InvalidArgumentError('Expected string for filename', filename)
elif not filename.startswith(_GS_PREFIX) or filename == _GS_PREFIX:
raise files.InvalidArgumentError(
'Google storage files must be of the form /gs/bucket/object', filename)
elif not mime_type:
raise files.InvalidArgumentError('Empty mime_type')
elif not isinstance(mime_type, basestring):
raise files.InvalidArgumentError('Expected string for mime_type', mime_type)
params = {_MIME_TYPE_PARAMETER: mime_type}
if acl:
if not isinstance(acl, basestring):
raise files.InvalidArgumentError('Expected string for acl', acl)
params[_CANNED_ACL_PARAMETER] = acl
if content_encoding:
if not isinstance(content_encoding, basestring):
raise files.InvalidArgumentError('Expected string for content_encoding')
else:
params[_CONTENT_ENCODING_PARAMETER] = content_encoding
if content_disposition:
if not isinstance(content_disposition, basestring):
raise files.InvalidArgumentError(
'Expected string for content_disposition')
else:
params[_CONTENT_DISPOSITION_PARAMETER] = content_disposition
if cache_control:
if not isinstance(cache_control, basestring):
raise files.InvalidArgumentError('Expected string for cache_control')
else:
params[_CACHE_CONTROL_PARAMETER] = cache_control
if user_metadata:
if not isinstance(user_metadata, dict):
raise files.InvalidArgumentError('Expected dict for user_metadata')
for key, value in user_metadata.items():
if not isinstance(key, basestring):
raise files.InvalidArgumentError(
'Expected string for key in user_metadata')
if not isinstance(value, basestring):
raise files.InvalidArgumentError(
'Expected string for value in user_metadata for key: ', key)
params[_USER_METADATA_PREFIX + key] = value
return files._create(_GS_FILESYSTEM, filename=filename, params=params)
def default_bucket_name():
"""Obtain the default Google Storage bucket name for this application.
Returns:
A string that is the name of the default bucket.
"""
return files._default_gs_bucket_name()
| bsd-3-clause |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/django/contrib/gis/geos/collections.py | 292 | 4986 | """
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
import json
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import (
GEOSGeometry, ProjectInterpolateMixin,
)
from django.contrib.gis.geos.libgeos import get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
from django.utils.six.moves import range
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initializes a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if not args:
raise TypeError('Must provide at least one Geometry to initialize %s.' % self.__class__.__name__)
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super(GeometryCollection, self).__init__(collection, **kwargs)
def __iter__(self):
"Iterates over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Returns the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def json(self):
if self.__class__.__name__ == 'GeometryCollection':
return json.dumps({
'type': self.__class__.__name__,
'geometries': [
{'type': geom.__class__.__name__, 'coordinates': geom.coords}
for geom in self
],
})
return super(GeometryCollection, self).json
geojson = json
@property
def kml(self):
"Returns the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Returns a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(ProjectInterpolateMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def merged(self):
"""
Returns a LineString representing the line merge of this
MultiLineString.
"""
return self._topology(capi.geos_linemerge(self.ptr))
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
@property
def cascaded_union(self):
"Returns a cascaded union of this MultiPolygon."
return GEOSGeometry(capi.geos_cascaded_union(self.ptr), self.srid)
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
| mit |
sukiand/idapython | examples/ex_cli.py | 16 | 3448 | # -----------------------------------------------------------------------
# This is an example illustrating how to implement a CLI
# (c) Hex-Rays
#
from idaapi import NW_OPENIDB, NW_CLOSEIDB, NW_TERMIDA, NW_REMOVE, COLSTR, cli_t
#<pycode(ex_cli_ex1)>
class mycli_t(cli_t):
flags = 0
sname = "pycli"
lname = "Python CLI"
hint = "pycli hint"
def OnExecuteLine(self, line):
"""
The user pressed Enter. The CLI is free to execute the line immediately or ask for more lines.
This callback is mandatory.
@param line: typed line(s)
@return Boolean: True-executed line, False-ask for more lines
"""
print "OnExecute:", line
return True
def OnKeydown(self, line, x, sellen, vkey, shift):
"""
A keyboard key has been pressed
This is a generic callback and the CLI is free to do whatever it wants.
This callback is optional.
@param line: current input line
@param x: current x coordinate of the cursor
@param sellen: current selection length (usually 0)
@param vkey: virtual key code. if the key has been handled, it should be returned as zero
@param shift: shift state
@return:
None - Nothing was changed
tuple(line, x, sellen, vkey): if either of the input line or the x coordinate or the selection length has been modified.
It is possible to return a tuple with None elements to preserve old values. Example: tuple(new_line, None, None, None) or tuple(new_line)
"""
print "Onkeydown: line=%s x=%d sellen=%d vkey=%d shift=%d" % (line, x, sellen, vkey, shift)
return None
def OnCompleteLine(self, prefix, n, line, prefix_start):
"""
The user pressed Tab. Find a completion number N for prefix PREFIX
This callback is optional.
@param prefix: Line prefix at prefix_start (string)
@param n: completion number (int)
@param line: the current line (string)
@param prefix_start: the index where PREFIX starts in LINE (int)
@return: None if no completion could be generated otherwise a String with the completion suggestion
"""
print "OnCompleteLine: prefix=%s n=%d line=%s prefix_start=%d" % (prefix, n, line, prefix_start)
return None
#</pycode(ex_cli_ex1)>
# -----------------------------------------------------------------------
def nw_handler(code, old=0):
if code == NW_OPENIDB:
print "nw_handler(): installing CLI"
mycli.register()
elif code == NW_CLOSEIDB:
print "nw_handler(): removing CLI"
mycli.unregister()
elif code == NW_TERMIDA:
print "nw_handler(): uninstalled nw handler"
idaapi.notify_when(NW_TERMIDA | NW_OPENIDB | NW_CLOSEIDB | NW_REMOVE, nw_handler)
# -----------------------------------------------------------------------
# Already installed?
try:
mycli
# remove previous CLI
mycli.unregister()
del mycli
# remove previous handler
nw_handler(NW_TERMIDA)
except:
pass
finally:
mycli = mycli_t()
# register CLI
if mycli.register():
print "CLI installed"
# install new handler
idaapi.notify_when(NW_TERMIDA | NW_OPENIDB | NW_CLOSEIDB, nw_handler)
else:
del mycli
print "Failed to install CLI"
| bsd-3-clause |
disruptek/boto | boto/sdb/db/__init__.py | 189 | 1108 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| mit |
papouso/odoo | addons/hr_payroll_account/hr_payroll_account.py | 240 | 10840 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date, datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slip'
_columns = {
'period_id': fields.many2one('account.period', 'Force Period',states={'draft': [('readonly', False)]}, readonly=True, domain=[('state','<>','done')], help="Keep empty to use the period of the validation(Payslip) date."),
'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True),
'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'journal_id' in context:
vals.update({'journal_id': context.get('journal_id')})
return super(hr_payslip, self).create(cr, uid, vals, context=context)
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
contract_obj = self.pool.get('hr.contract')
res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or False
res['value'].update({'journal_id': journal_id})
return res
def cancel_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
move_ids = []
move_to_cancel = []
for slip in self.browse(cr, uid, ids, context=context):
if slip.move_id:
move_ids.append(slip.move_id.id)
if slip.move_id.state == 'posted':
move_to_cancel.append(slip.move_id.id)
move_pool.button_cancel(cr, uid, move_to_cancel, context=context)
move_pool.unlink(cr, uid, move_ids, context=context)
return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)
def process_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
period_pool = self.pool.get('account.period')
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll')
timenow = time.strftime('%Y-%m-%d')
for slip in self.browse(cr, uid, ids, context=context):
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
if not slip.period_id:
search_periods = period_pool.find(cr, uid, slip.date_to, context=context)
period_id = search_periods[0]
else:
period_id = slip.period_id.id
default_partner_id = slip.employee_id.address_home_id.id
name = _('Payslip of %s') % (slip.employee_id.name)
move = {
'narration': name,
'date': timenow,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'period_id': period_id,
}
for line in slip.details_by_salary_rule_category:
amt = slip.credit_note and -line.total or line.total
if float_is_zero(amt, precision_digits=precision):
continue
partner_id = line.salary_rule_id.register_id.partner_id and line.salary_rule_id.register_id.partner_id.id or default_partner_id
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_debit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt > 0.0 and amt or 0.0,
'credit': amt < 0.0 and -amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_credit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt < 0.0 and -amt or 0.0,
'credit': amt > 0.0 and amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Credit Account!')%(slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': 0.0,
'credit': debit_sum - credit_sum,
})
line_ids.append(adjust_credit)
elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Debit Account!')%(slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': credit_sum - debit_sum,
'credit': 0.0,
})
line_ids.append(adjust_debit)
move.update({'line_id': line_ids})
move_id = move_pool.create(cr, uid, move, context=context)
self.write(cr, uid, [slip.id], {'move_id': move_id, 'period_id' : period_id}, context=context)
if slip.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context=context)
return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context)
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'account_tax_id':fields.many2one('account.tax.code', 'Tax Code'),
'account_debit': fields.many2one('account.account', 'Debit Account'),
'account_credit': fields.many2one('account.account', 'Credit Account'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'journal_id': fields.many2one('account.journal', 'Salary Journal'),
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Run'
_columns = {
'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cristianquaglio/odoo | addons/hr_payroll_account/hr_payroll_account.py | 52 | 10905 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import date, datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slip'
_columns = {
'period_id': fields.many2one('account.period', 'Force Period',states={'draft': [('readonly', False)]}, readonly=True, domain=[('state','<>','done')], help="Keep empty to use the period of the validation(Payslip) date."),
'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True),
'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'journal_id' in context:
vals.update({'journal_id': context.get('journal_id')})
return super(hr_payslip, self).create(cr, uid, vals, context=context)
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
contract_obj = self.pool.get('hr.contract')
res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or (not contract_id and self._get_default_journal(cr, uid, context=None))
res['value'].update({'journal_id': journal_id})
return res
def cancel_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
move_ids = []
move_to_cancel = []
for slip in self.browse(cr, uid, ids, context=context):
if slip.move_id:
move_ids.append(slip.move_id.id)
if slip.move_id.state == 'posted':
move_to_cancel.append(slip.move_id.id)
move_pool.button_cancel(cr, uid, move_to_cancel, context=context)
move_pool.unlink(cr, uid, move_ids, context=context)
return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)
def process_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
period_pool = self.pool.get('account.period')
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll')
timenow = time.strftime('%Y-%m-%d')
for slip in self.browse(cr, uid, ids, context=context):
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
if not slip.period_id:
search_periods = period_pool.find(cr, uid, slip.date_to, context=context)
period_id = search_periods[0]
else:
period_id = slip.period_id.id
default_partner_id = slip.employee_id.address_home_id.id
name = _('Payslip of %s') % (slip.employee_id.name)
move = {
'narration': name,
'date': timenow,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'period_id': period_id,
}
for line in slip.details_by_salary_rule_category:
amt = slip.credit_note and -line.total or line.total
if float_is_zero(amt, precision_digits=precision):
continue
partner_id = line.salary_rule_id.register_id.partner_id and line.salary_rule_id.register_id.partner_id.id or default_partner_id
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_debit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt > 0.0 and amt or 0.0,
'credit': amt < 0.0 and -amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'date': timenow,
'partner_id': (line.salary_rule_id.register_id.partner_id or line.salary_rule_id.account_credit.type in ('receivable', 'payable')) and partner_id or False,
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': amt < 0.0 and -amt or 0.0,
'credit': amt > 0.0 and amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_code_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
'tax_amount': line.salary_rule_id.account_tax_id and amt or 0.0,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Credit Account!')%(slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': 0.0,
'credit': debit_sum - credit_sum,
})
line_ids.append(adjust_credit)
elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise osv.except_osv(_('Configuration Error!'),_('The Expense Journal "%s" has not properly configured the Debit Account!')%(slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'period_id': period_id,
'debit': credit_sum - debit_sum,
'credit': 0.0,
})
line_ids.append(adjust_debit)
move.update({'line_id': line_ids})
move_id = move_pool.create(cr, uid, move, context=context)
self.write(cr, uid, [slip.id], {'move_id': move_id, 'period_id' : period_id}, context=context)
if slip.journal_id.entry_posted:
move_pool.post(cr, uid, [move_id], context=context)
return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context)
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'account_tax_id':fields.many2one('account.tax.code', 'Tax Code'),
'account_debit': fields.many2one('account.account', 'Debit Account'),
'account_credit': fields.many2one('account.account', 'Credit Account'),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account'),
'journal_id': fields.many2one('account.journal', 'Salary Journal'),
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Run'
_columns = {
'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True),
}
def _get_default_journal(self, cr, uid, context=None):
model_data = self.pool.get('ir.model.data')
res = model_data.search(cr, uid, [('name', '=', 'expenses_journal')])
if res:
return model_data.browse(cr, uid, res[0]).res_id
return False
_defaults = {
'journal_id': _get_default_journal,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| apache-2.0 |
pilou-/ansible | lib/ansible/modules/cloud/xenserver/xenserver_guest.py | 7 | 97662 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Bojan Vitnik <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: xenserver_guest
short_description: Manages virtual machines running on Citrix XenServer host or pool
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
modify various virtual machine components like network and disk, rename a virtual machine and
remove a virtual machine with associated components.
version_added: '2.8'
author:
- Bojan Vitnik (@bvitnik) <[email protected]>
notes:
- Minimal supported version of XenServer is 5.6.
- Module was tested with XenServer 6.5, 7.1 and 7.2.
- 'XenAPI Python library can be acquired from XenServer SDK (downloadable from Citrix website) or by running C(pip install XenAPI) (possibly very old
version, not compatible with Python 3.x). Latest version can also be acquired from GitHub:
https://raw.githubusercontent.com/xapi-project/xen-api/master/scripts/examples/python/XenAPI.py'
- 'If no scheme is specified in C(hostname), module defaults to C(http://) because C(https://) is problematic in most setups. Make sure you are
accessing XenServer host in trusted environment or use C(https://) scheme explicitly.'
- 'To use C(https://) scheme for C(hostname) you have to either import host certificate to your OS certificate store or use C(validate_certs: no)
which requires XenAPI library from XenServer 7.2 SDK or newer and Python 2.7.9 or newer.'
- 'Network configuration inside a guest OS, by using C(networks.type), C(networks.ip), C(networks.gateway) etc. parameters, is supported on
XenServer 7.0 or newer for Windows guests by using official XenServer Guest agent support for network configuration. The module will try to
detect if such support is available and utilize it, else it will use a custom method of configuration via xenstore. Since XenServer Guest
agent only support None and Static types of network configuration, where None means DHCP configured interface, C(networks.type) and C(networks.type6)
values C(none) and C(dhcp) have same effect.
More info here: https://xenserver.org/blog/entry/set-windows-guest-vm-static-ip-address-in-xenserver.html'
- 'On platforms without official support for network configuration inside a guest OS, network parameters will be written to xenstore
C(vm-data/networks/<vif_device>) key. Parameters can be inspected by using C(xenstore ls) and C(xenstore read) tools on \*nix guests or trough
WMI interface on Windows guests. They can also be found in VM facts C(instance.xenstore_data) key as returned by the module. It is up to the user
to implement a boot time scripts or custom agent that will read the parameters from xenstore and configure network with given parameters.
Take note that for xenstore data to become available inside a guest, a VM restart is needed hence module will require VM restart if any
parameter is changed. This is a limitation of XenAPI and xenstore. Considering these limitations, network configuration trough xenstore is most
useful for bootstraping newly deployed VMs, much less for reconfiguring existing ones.
More info here: https://support.citrix.com/article/CTX226713'
requirements:
- python >= 2.6
- XenAPI
options:
state:
description:
- Specify the state VM should be in.
- If C(state) is set to C(present) and VM exists, ensure the VM configuration conforms to given parameters.
- If C(state) is set to C(present) and VM does not exist, then VM is deployed with given parameters.
- If C(state) is set to C(absent) and VM exists, then VM is removed with its associated components.
- If C(state) is set to C(poweredon) and VM does not exist, then VM is deployed with given parameters and powered on automatically.
type: str
default: present
choices: [ present, absent, poweredon ]
name:
description:
- Name of the VM to work with.
- VMs running on XenServer do not necessarily have unique names. The module will fail if multiple VMs with same name are found.
- In case of multiple VMs with same name, use C(uuid) to uniquely specify VM to manage.
- This parameter is case sensitive.
type: str
required: yes
aliases: [ name_label ]
name_desc:
description:
- VM description.
type: str
uuid:
description:
- UUID of the VM to manage if known. This is XenServer's unique identifier.
- It is required if name is not unique.
- Please note that a supplied UUID will be ignored on VM creation, as XenServer creates the UUID internally.
type: str
template:
description:
- Name of a template, an existing VM (must be shut down) or a snapshot that should be used to create VM.
- Templates/VMs/snapshots on XenServer do not necessarily have unique names. The module will fail if multiple templates with same name are found.
- In case of multiple templates/VMs/snapshots with same name, use C(template_uuid) to uniquely specify source template.
- If VM already exists, this setting will be ignored.
- This parameter is case sensitive.
type: str
aliases: [ template_src ]
template_uuid:
description:
- UUID of a template, an existing VM or a snapshot that should be used to create VM.
- It is required if template name is not unique.
type: str
is_template:
description:
- Convert VM to template.
type: bool
default: no
folder:
description:
- Destination folder for VM.
- This parameter is case sensitive.
- 'Example:'
- ' folder: /folder1/folder2'
type: str
hardware:
description:
- Manage VM's hardware parameters. VM needs to be shut down to reconfigure these parameters.
- 'Valid parameters are:'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. C(num_cpus) has to be a multiple of C(num_cpu_cores_per_socket).'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
type: dict
disks:
description:
- A list of disks to add to VM.
- All parameters are case sensetive.
- Removing or detaching existing disks of VM is not supported.
- 'Required parameters per entry:'
- ' - C(size_[tb,gb,mb,kb,b]) (integer): Disk storage size in specified unit. VM needs to be shut down to reconfigure this parameter.'
- 'Optional parameters per entry:'
- ' - C(name) (string): Disk name. You can also use C(name_label) as an alias.'
- ' - C(name_desc) (string): Disk description.'
- ' - C(sr) (string): Storage Repository to create disk on. If not specified, will use default SR. Cannot be used for moving disk to other SR.'
- ' - C(sr_uuid) (string): UUID of a SR to create disk on. Use if SR name is not unique.'
type: list
aliases: [ disk ]
cdrom:
description:
- A CD-ROM configuration for the VM.
- All parameters are case sensitive.
- 'Valid parameters are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none) or C(iso). With C(none) the CD-ROM device will be present but empty.'
- ' - C(iso_name) (string): The file name of an ISO image from one of the XenServer ISO Libraries (implies C(type: iso)).
Required if C(type) is set to C(iso).'
type: dict
networks:
description:
- A list of networks (in the order of the NICs).
- All parameters are case sensetive.
- 'Required parameters per entry:'
- ' - C(name) (string): Name of a XenServer network to attach the network interface to. You can also use C(name_label) as an alias.'
- 'Optional parameters per entry (used for VM hardware):'
- ' - C(mac) (string): Customize MAC address of the interface.'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IPv4 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
- ' - C(ip) (string): Static IPv4 address (implies C(type: static)). Can include prefix in format <IPv4 address>/<prefix> instead of using C(netmask).'
- ' - C(netmask) (string): Static IPv4 netmask required for C(ip) if prefix is not specified.'
- ' - C(gateway) (string): Static IPv4 gateway.'
- ' - C(type6) (string): Type of IPv6 assignment, valid options are C(none), C(dhcp) or C(static). Value C(none) means whatever is default for OS.
On some operating systems it could be DHCP configured (e.g. Windows) or unconfigured interface (e.g. Linux).'
- ' - C(ip6) (string): Static IPv6 address (implies C(type6: static)) with prefix in format <IPv6 address>/<prefix>.'
- ' - C(gateway6) (string): Static IPv6 gateway.'
type: list
aliases: [ network ]
home_server:
description:
- Name of a XenServer host that will be a Home Server for the VM.
- This parameter is case sensitive.
type: str
custom_params:
description:
- Define a list of custom VM params to set on VM.
- Useful for advanced users familiar with managing VM params trough xe CLI.
- A custom value object takes two fields C(key) and C(value) (see example below).
type: list
wait_for_ip_address:
description:
- Wait until XenServer detects an IP address for the VM. If C(state) is set to C(absent), this parameter is ignored.
- This requires XenServer Tools to be preinstalled on the VM to work properly.
type: bool
default: no
state_change_timeout:
description:
- 'By default, module will wait indefinitely for VM to accquire an IP address if C(wait_for_ip_address: yes).'
- If this parameter is set to positive value, the module will instead wait specified number of seconds for the state change.
- In case of timeout, module will generate an error message.
type: int
default: 0
linked_clone:
description:
- Whether to create a Linked Clone from the template, existing VM or snapshot. If no, will create a full copy.
- This is equivalent to C(Use storage-level fast disk clone) option in XenCenter.
type: bool
default: no
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful for removing VM in running state or reconfiguring VM params that require VM to be shut down.
type: bool
default: no
extends_documentation_fragment: xenserver.documentation
'''
EXAMPLES = r'''
- name: Create a VM from a template
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: CentOS 7
disks:
- size_gb: 10
sr: my_sr
hardware:
num_cpus: 6
num_cpu_cores_per_socket: 3
memory_mb: 512
cdrom:
type: iso
iso_name: guest-tools.iso
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Create a VM template
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
validate_certs: no
folder: /testvms
name: testvm_6
is_template: yes
disk:
- size_gb: 10
sr: my_sr
hardware:
memory_mb: 512
num_cpus: 1
delegate_to: localhost
register: deploy
- name: Rename a VM (requires the VM's UUID)
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
name: new_name
state: present
delegate_to: localhost
- name: Remove a VM by UUID
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
uuid: 421e4592-c069-924d-ce20-7e7533fab926
state: absent
delegate_to: localhost
- name: Modify custom params (boot order)
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
name: testvm_8
state: present
custom_params:
- key: HVM_boot_params
value: { "order": "ndc" }
delegate_to: localhost
- name: Customize network parameters
xenserver_guest:
hostname: "{{ xenserver_hostname }}"
username: "{{ xenserver_username }}"
password: "{{ xenserver_password }}"
name: testvm_10
networks:
- name: VM Network
ip: 192.168.1.100/24
gateway: 192.168.1.1
- type: dhcp
delegate_to: localhost
'''
RETURN = r'''
instance:
description: Metadata about the VM
returned: always
type: dict
sample: {
"cdrom": {
"type": "none"
},
"customization_agent": "native",
"disks": [
{
"name": "testvm_11-0",
"name_desc": "",
"os_device": "xvda",
"size": 42949672960,
"sr": "Local storage",
"sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
"vbd_userdevice": "0"
},
{
"name": "testvm_11-1",
"name_desc": "",
"os_device": "xvdb",
"size": 42949672960,
"sr": "Local storage",
"sr_uuid": "0af1245e-bdb0-ba33-1446-57a962ec4075",
"vbd_userdevice": "1"
}
],
"domid": "56",
"folder": "",
"hardware": {
"memory_mb": 8192,
"num_cpu_cores_per_socket": 2,
"num_cpus": 4
},
"home_server": "",
"is_template": false,
"name": "testvm_11",
"name_desc": "",
"networks": [
{
"gateway": "192.168.0.254",
"gateway6": "fc00::fffe",
"ip": "192.168.0.200",
"ip6": [
"fe80:0000:0000:0000:e9cb:625a:32c5:c291",
"fc00:0000:0000:0000:0000:0000:0000:0001"
],
"mac": "ba:91:3a:48:20:76",
"mtu": "1500",
"name": "Pool-wide network associated with eth1",
"netmask": "255.255.255.128",
"prefix": "25",
"prefix6": "64",
"vif_device": "0"
}
],
"other_config": {
"base_template_name": "Windows Server 2016 (64-bit)",
"import_task": "OpaqueRef:e43eb71c-45d6-5351-09ff-96e4fb7d0fa5",
"install-methods": "cdrom",
"instant": "true",
"mac_seed": "f83e8d8a-cfdc-b105-b054-ef5cb416b77e"
},
"platform": {
"acpi": "1",
"apic": "true",
"cores-per-socket": "2",
"device_id": "0002",
"hpet": "true",
"nx": "true",
"pae": "true",
"timeoffset": "-25200",
"vga": "std",
"videoram": "8",
"viridian": "true",
"viridian_reference_tsc": "true",
"viridian_time_ref_count": "true"
},
"state": "poweredon",
"uuid": "e3c0b2d5-5f05-424e-479c-d3df8b3e7cda",
"xenstore_data": {
"vm-data": ""
}
}
changes:
description: Detected or made changes to VM
returned: always
type: list
sample: [
{
"hardware": [
"num_cpus"
]
},
{
"disks_changed": [
[],
[
"size"
]
]
},
{
"disks_new": [
{
"name": "new-disk",
"name_desc": "",
"position": 2,
"size_gb": "4",
"vbd_userdevice": "2"
}
]
},
{
"cdrom": [
"type",
"iso_name"
]
},
{
"networks_changed": [
[
"mac"
],
]
},
{
"networks_new": [
{
"name": "Pool-wide network associated with eth2",
"position": 1,
"vif_device": "1"
}
]
},
"need_poweredoff"
]
'''
import re
HAS_XENAPI = False
try:
import XenAPI
HAS_XENAPI = True
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import six
from ansible.module_utils.xenserver import (xenserver_common_argument_spec, XAPI, XenServerObject, get_object_ref,
gather_vm_params, gather_vm_facts, set_vm_power_state, wait_for_vm_ip_address,
is_valid_mac_addr, is_valid_ip_addr, is_valid_ip_netmask, is_valid_ip_prefix,
ip_prefix_to_netmask, ip_netmask_to_prefix,
is_valid_ip6_addr, is_valid_ip6_prefix)
class XenServerVM(XenServerObject):
"""Class for managing XenServer VM.
Attributes:
vm_ref (str): XAPI reference to VM.
vm_params (dict): A dictionary with VM parameters as returned
by gather_vm_params() function.
"""
def __init__(self, module):
"""Inits XenServerVM using module parameters.
Args:
module: Reference to Ansible module object.
"""
super(XenServerVM, self).__init__(module)
self.vm_ref = get_object_ref(self.module, self.module.params['name'], self.module.params['uuid'], obj_type="VM", fail=False, msg_prefix="VM search: ")
self.gather_params()
def exists(self):
"""Returns True if VM exists, else False."""
return True if self.vm_ref is not None else False
def gather_params(self):
"""Gathers all VM parameters available in XAPI database."""
self.vm_params = gather_vm_params(self.module, self.vm_ref)
def gather_facts(self):
"""Gathers and returns VM facts."""
return gather_vm_facts(self.module, self.vm_params)
def set_power_state(self, power_state):
"""Controls VM power state."""
state_changed, current_state = set_vm_power_state(self.module, self.vm_ref, power_state, self.module.params['state_change_timeout'])
# If state has changed, update vm_params.
if state_changed:
self.vm_params['power_state'] = current_state.capitalize()
return state_changed
def wait_for_ip_address(self):
"""Waits for VM to acquire an IP address."""
self.vm_params['guest_metrics'] = wait_for_vm_ip_address(self.module, self.vm_ref, self.module.params['state_change_timeout'])
def deploy(self):
"""Deploys new VM from template."""
# Safety check.
if self.exists():
self.module.fail_json(msg="Called deploy on existing VM!")
try:
templ_ref = get_object_ref(self.module, self.module.params['template'], self.module.params['template_uuid'], obj_type="template", fail=True,
msg_prefix="VM deploy: ")
# Is this an existing running VM?
if self.xapi_session.xenapi.VM.get_power_state(templ_ref).lower() != 'halted':
self.module.fail_json(msg="VM deploy: running VM cannot be used as a template!")
# Find a SR we can use for VM.copy(). We use SR of the first disk
# if specified or default SR if not specified.
disk_params_list = self.module.params['disks']
sr_ref = None
if disk_params_list:
disk_params = disk_params_list[0]
disk_sr_uuid = disk_params.get('sr_uuid')
disk_sr = disk_params.get('sr')
if disk_sr_uuid is not None or disk_sr is not None:
sr_ref = get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
msg_prefix="VM deploy disks[0]: ")
if not sr_ref:
if self.default_sr_ref != "OpaqueRef:NULL":
sr_ref = self.default_sr_ref
else:
self.module.fail_json(msg="VM deploy disks[0]: no default SR found! You must specify SR explicitly.")
# VM name could be an empty string which is bad.
if self.module.params['name'] is not None and not self.module.params['name']:
self.module.fail_json(msg="VM deploy: VM name must not be an empty string!")
# Support for Ansible check mode.
if self.module.check_mode:
return
# Now we can instantiate VM. We use VM.clone for linked_clone and
# VM.copy for non linked_clone.
if self.module.params['linked_clone']:
self.vm_ref = self.xapi_session.xenapi.VM.clone(templ_ref, self.module.params['name'])
else:
self.vm_ref = self.xapi_session.xenapi.VM.copy(templ_ref, self.module.params['name'], sr_ref)
# Description is copied over from template so we reset it.
self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, "")
# If template is one of built-in XenServer templates, we have to
# do some additional steps.
# Note: VM.get_is_default_template() is supported from XenServer 7.2
# onward so we use an alternative way.
templ_other_config = self.xapi_session.xenapi.VM.get_other_config(templ_ref)
if "default_template" in templ_other_config and templ_other_config['default_template']:
# other_config of built-in XenServer templates have a key called
# 'disks' with the following content:
# disks: <provision><disk bootable="true" device="0" size="10737418240" sr="" type="system"/></provision>
# This value of other_data is copied to cloned or copied VM and
# it prevents provisioning of VM because sr is not specified and
# XAPI returns an error. To get around this, we remove the
# 'disks' key and add disks to VM later ourselves.
vm_other_config = self.xapi_session.xenapi.VM.get_other_config(self.vm_ref)
if "disks" in vm_other_config:
del vm_other_config['disks']
self.xapi_session.xenapi.VM.set_other_config(self.vm_ref, vm_other_config)
# At this point we have VM ready for provisioning.
self.xapi_session.xenapi.VM.provision(self.vm_ref)
# After provisioning we can prepare vm_params for reconfigure().
self.gather_params()
# VM is almost ready. We just need to reconfigure it...
self.reconfigure()
# Power on VM if needed.
if self.module.params['state'] == "poweredon":
self.set_power_state("poweredon")
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
def reconfigure(self):
"""Reconfigures an existing VM.
Returns:
list: parameters that were reconfigured.
"""
# Safety check.
if not self.exists():
self.module.fail_json(msg="Called reconfigure on non existing VM!")
config_changes = self.get_changes()
vm_power_state_save = self.vm_params['power_state'].lower()
if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and not self.module.params['force']:
self.module.fail_json(msg="VM reconfigure: VM has to be in powered off state to reconfigure but force was not specified!")
# Support for Ansible check mode.
if self.module.check_mode:
return config_changes
if "need_poweredoff" in config_changes and vm_power_state_save != 'halted' and self.module.params['force']:
self.set_power_state("shutdownguest")
try:
for change in config_changes:
if isinstance(change, six.string_types):
if change == "name":
self.xapi_session.xenapi.VM.set_name_label(self.vm_ref, self.module.params['name'])
elif change == "name_desc":
self.xapi_session.xenapi.VM.set_name_description(self.vm_ref, self.module.params['name_desc'])
elif change == "folder":
self.xapi_session.xenapi.VM.remove_from_other_config(self.vm_ref, 'folder')
if self.module.params['folder']:
self.xapi_session.xenapi.VM.add_to_other_config(self.vm_ref, 'folder', self.module.params['folder'])
elif change == "home_server":
if self.module.params['home_server']:
host_ref = self.xapi_session.xenapi.host.get_by_name_label(self.module.params['home_server'])[0]
else:
host_ref = "OpaqueRef:NULL"
self.xapi_session.xenapi.VM.set_affinity(self.vm_ref, host_ref)
elif isinstance(change, dict):
if change.get('hardware'):
for hardware_change in change['hardware']:
if hardware_change == "num_cpus":
num_cpus = int(self.module.params['hardware']['num_cpus'])
if num_cpus < int(self.vm_params['VCPUs_at_startup']):
self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
else:
self.xapi_session.xenapi.VM.set_VCPUs_max(self.vm_ref, str(num_cpus))
self.xapi_session.xenapi.VM.set_VCPUs_at_startup(self.vm_ref, str(num_cpus))
elif hardware_change == "num_cpu_cores_per_socket":
self.xapi_session.xenapi.VM.remove_from_platform(self.vm_ref, 'cores-per-socket')
num_cpu_cores_per_socket = int(self.module.params['hardware']['num_cpu_cores_per_socket'])
if num_cpu_cores_per_socket > 1:
self.xapi_session.xenapi.VM.add_to_platform(self.vm_ref, 'cores-per-socket', str(num_cpu_cores_per_socket))
elif hardware_change == "memory_mb":
memory_b = str(int(self.module.params['hardware']['memory_mb']) * 1048576)
vm_memory_static_min_b = str(min(int(memory_b), int(self.vm_params['memory_static_min'])))
self.xapi_session.xenapi.VM.set_memory_limits(self.vm_ref, vm_memory_static_min_b, memory_b, memory_b, memory_b)
elif change.get('disks_changed'):
vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
position = 0
for disk_change_list in change['disks_changed']:
for disk_change in disk_change_list:
vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params_list[position]['VDI']['uuid'])
if disk_change == "name":
self.xapi_session.xenapi.VDI.set_name_label(vdi_ref, self.module.params['disks'][position]['name'])
elif disk_change == "name_desc":
self.xapi_session.xenapi.VDI.set_name_description(vdi_ref, self.module.params['disks'][position]['name_desc'])
elif disk_change == "size":
self.xapi_session.xenapi.VDI.resize(vdi_ref, str(self.get_normalized_disk_size(self.module.params['disks'][position],
"VM reconfigure disks[%s]: " % position)))
position += 1
elif change.get('disks_new'):
for position, disk_userdevice in change['disks_new']:
disk_params = self.module.params['disks'][position]
disk_name = disk_params['name'] if disk_params.get('name') else "%s-%s" % (self.vm_params['name_label'], position)
disk_name_desc = disk_params['name_desc'] if disk_params.get('name_desc') else ""
if disk_params.get('sr_uuid'):
sr_ref = self.xapi_session.xenapi.SR.get_by_uuid(disk_params['sr_uuid'])
elif disk_params.get('sr'):
sr_ref = self.xapi_session.xenapi.SR.get_by_name_label(disk_params['sr'])[0]
else:
sr_ref = self.default_sr_ref
disk_size = str(self.get_normalized_disk_size(self.module.params['disks'][position], "VM reconfigure disks[%s]: " % position))
new_disk_vdi = {
"name_label": disk_name,
"name_description": disk_name_desc,
"SR": sr_ref,
"virtual_size": disk_size,
"type": "user",
"sharable": False,
"read_only": False,
"other_config": {},
}
new_disk_vbd = {
"VM": self.vm_ref,
"VDI": None,
"userdevice": disk_userdevice,
"bootable": False,
"mode": "RW",
"type": "Disk",
"empty": False,
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
new_disk_vbd['VDI'] = self.xapi_session.xenapi.VDI.create(new_disk_vdi)
self.xapi_session.xenapi.VBD.create(new_disk_vbd)
elif change.get('cdrom'):
vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
# If there is no CD present, we have to create one.
if not vm_cdrom_params_list:
# We will try to place cdrom at userdevice position
# 3 (which is default) if it is not already occupied
# else we will place it at first allowed position.
cdrom_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
if "3" in cdrom_userdevices_allowed:
cdrom_userdevice = "3"
else:
cdrom_userdevice = cdrom_userdevices_allowed[0]
cdrom_vbd = {
"VM": self.vm_ref,
"VDI": "OpaqueRef:NULL",
"userdevice": cdrom_userdevice,
"bootable": False,
"mode": "RO",
"type": "CD",
"empty": True,
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
cdrom_vbd_ref = self.xapi_session.xenapi.VBD.create(cdrom_vbd)
else:
cdrom_vbd_ref = self.xapi_session.xenapi.VBD.get_by_uuid(vm_cdrom_params_list[0]['uuid'])
cdrom_is_empty = self.xapi_session.xenapi.VBD.get_empty(cdrom_vbd_ref)
for cdrom_change in change['cdrom']:
if cdrom_change == "type":
cdrom_type = self.module.params['cdrom']['type']
if cdrom_type == "none" and not cdrom_is_empty:
self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
elif cdrom_type == "host":
# Unimplemented!
pass
elif cdrom_change == "iso_name":
if not cdrom_is_empty:
self.xapi_session.xenapi.VBD.eject(cdrom_vbd_ref)
cdrom_vdi_ref = self.xapi_session.xenapi.VDI.get_by_name_label(self.module.params['cdrom']['iso_name'])[0]
self.xapi_session.xenapi.VBD.insert(cdrom_vbd_ref, cdrom_vdi_ref)
elif change.get('networks_changed'):
position = 0
for network_change_list in change['networks_changed']:
if network_change_list:
vm_vif_params = self.vm_params['VIFs'][position]
network_params = self.module.params['networks'][position]
vif_ref = self.xapi_session.xenapi.VIF.get_by_uuid(vm_vif_params['uuid'])
network_ref = self.xapi_session.xenapi.network.get_by_uuid(vm_vif_params['network']['uuid'])
vif_recreated = False
if "name" in network_change_list or "mac" in network_change_list:
# To change network or MAC, we destroy old
# VIF and then create a new one with changed
# parameters. That's how XenCenter does it.
# Copy all old parameters to new VIF record.
vif = {
"device": vm_vif_params['device'],
"network": network_ref,
"VM": vm_vif_params['VM'],
"MAC": vm_vif_params['MAC'],
"MTU": vm_vif_params['MTU'],
"other_config": vm_vif_params['other_config'],
"qos_algorithm_type": vm_vif_params['qos_algorithm_type'],
"qos_algorithm_params": vm_vif_params['qos_algorithm_params'],
"locking_mode": vm_vif_params['locking_mode'],
"ipv4_allowed": vm_vif_params['ipv4_allowed'],
"ipv6_allowed": vm_vif_params['ipv6_allowed'],
}
if "name" in network_change_list:
network_ref_new = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
vif['network'] = network_ref_new
vif['MTU'] = self.xapi_session.xenapi.network.get_MTU(network_ref_new)
if "mac" in network_change_list:
vif['MAC'] = network_params['mac'].lower()
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VIF.unplug(vif_ref)
self.xapi_session.xenapi.VIF.destroy(vif_ref)
vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VIF.plug(vif_ref_new)
vif_ref = vif_ref_new
vif_recreated = True
if self.vm_params['customization_agent'] == "native":
vif_reconfigure_needed = False
if "type" in network_change_list:
network_type = network_params['type'].capitalize()
vif_reconfigure_needed = True
else:
network_type = vm_vif_params['ipv4_configuration_mode']
if "ip" in network_change_list:
network_ip = network_params['ip']
vif_reconfigure_needed = True
elif vm_vif_params['ipv4_addresses']:
network_ip = vm_vif_params['ipv4_addresses'][0].split('/')[0]
else:
network_ip = ""
if "prefix" in network_change_list:
network_prefix = "/%s" % network_params['prefix']
vif_reconfigure_needed = True
elif vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
network_prefix = "/%s" % vm_vif_params['ipv4_addresses'][0].split('/')[1]
else:
network_prefix = ""
if "gateway" in network_change_list:
network_gateway = network_params['gateway']
vif_reconfigure_needed = True
else:
network_gateway = vm_vif_params['ipv4_gateway']
if vif_recreated or vif_reconfigure_needed:
self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref, network_type,
"%s%s" % (network_ip, network_prefix), network_gateway)
vif_reconfigure_needed = False
if "type6" in network_change_list:
network_type6 = network_params['type6'].capitalize()
vif_reconfigure_needed = True
else:
network_type6 = vm_vif_params['ipv6_configuration_mode']
if "ip6" in network_change_list:
network_ip6 = network_params['ip6']
vif_reconfigure_needed = True
elif vm_vif_params['ipv6_addresses']:
network_ip6 = vm_vif_params['ipv6_addresses'][0].split('/')[0]
else:
network_ip6 = ""
if "prefix6" in network_change_list:
network_prefix6 = "/%s" % network_params['prefix6']
vif_reconfigure_needed = True
elif vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
network_prefix6 = "/%s" % vm_vif_params['ipv6_addresses'][0].split('/')[1]
else:
network_prefix6 = ""
if "gateway6" in network_change_list:
network_gateway6 = network_params['gateway6']
vif_reconfigure_needed = True
else:
network_gateway6 = vm_vif_params['ipv6_gateway']
if vif_recreated or vif_reconfigure_needed:
self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref, network_type6,
"%s%s" % (network_ip6, network_prefix6), network_gateway6)
elif self.vm_params['customization_agent'] == "custom":
vif_device = vm_vif_params['device']
# A user could have manually changed network
# or mac e.g. trough XenCenter and then also
# make those changes in playbook manually.
# In that case, module will not detect any
# changes and info in xenstore_data will
# become stale. For that reason we always
# update name and mac in xenstore_data.
# Since we handle name and mac differently,
# we have to remove them from
# network_change_list.
network_change_list_tmp = [net_chg for net_chg in network_change_list if net_chg not in ['name', 'mac']]
for network_change in network_change_list_tmp + ['name', 'mac']:
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, network_change))
if network_params.get('name'):
network_name = network_params['name']
else:
network_name = vm_vif_params['network']['name_label']
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, 'name'), network_name)
if network_params.get('mac'):
network_mac = network_params['mac'].lower()
else:
network_mac = vm_vif_params['MAC'].lower()
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, 'mac'), network_mac)
for network_change in network_change_list_tmp:
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/%s" % (vif_device, network_change),
network_params[network_change])
position += 1
elif change.get('networks_new'):
for position, vif_device in change['networks_new']:
network_params = self.module.params['networks'][position]
network_ref = self.xapi_session.xenapi.network.get_by_name_label(network_params['name'])[0]
network_name = network_params['name']
network_mac = network_params['mac'] if network_params.get('mac') else ""
network_type = network_params.get('type')
network_ip = network_params['ip'] if network_params.get('ip') else ""
network_prefix = network_params['prefix'] if network_params.get('prefix') else ""
network_netmask = network_params['netmask'] if network_params.get('netmask') else ""
network_gateway = network_params['gateway'] if network_params.get('gateway') else ""
network_type6 = network_params.get('type6')
network_ip6 = network_params['ip6'] if network_params.get('ip6') else ""
network_prefix6 = network_params['prefix6'] if network_params.get('prefix6') else ""
network_gateway6 = network_params['gateway6'] if network_params.get('gateway6') else ""
vif = {
"device": vif_device,
"network": network_ref,
"VM": self.vm_ref,
"MAC": network_mac,
"MTU": self.xapi_session.xenapi.network.get_MTU(network_ref),
"other_config": {},
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
vif_ref_new = self.xapi_session.xenapi.VIF.create(vif)
if self.vm_params['power_state'].lower() == "running":
self.xapi_session.xenapi.VIF.plug(vif_ref_new)
if self.vm_params['customization_agent'] == "native":
if network_type and network_type == "static":
self.xapi_session.xenapi.VIF.configure_ipv4(vif_ref_new, "Static",
"%s/%s" % (network_ip, network_prefix), network_gateway)
if network_type6 and network_type6 == "static":
self.xapi_session.xenapi.VIF.configure_ipv6(vif_ref_new, "Static",
"%s/%s" % (network_ip6, network_prefix6), network_gateway6)
elif self.vm_params['customization_agent'] == "custom":
# We first have to remove any existing data
# from xenstore_data because there could be
# some old leftover data from some interface
# that once occupied same device location as
# our new interface.
for network_param in ['name', 'mac', 'type', 'ip', 'prefix', 'netmask', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
self.xapi_session.xenapi.VM.remove_from_xenstore_data(self.vm_ref, "vm-data/networks/%s/%s" % (vif_device, network_param))
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/name" % vif_device, network_name)
# We get MAC from VIF itself instead of
# networks.mac because it could be
# autogenerated.
vm_vif_mac = self.xapi_session.xenapi.VIF.get_MAC(vif_ref_new)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/mac" % vif_device, vm_vif_mac)
if network_type:
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type" % vif_device, network_type)
if network_type == "static":
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/ip" % vif_device, network_ip)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/prefix" % vif_device, network_prefix)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/netmask" % vif_device, network_netmask)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/gateway" % vif_device, network_gateway)
if network_type6:
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref, "vm-data/networks/%s/type6" % vif_device, network_type6)
if network_type6 == "static":
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/ip6" % vif_device, network_ip6)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/prefix6" % vif_device, network_prefix6)
self.xapi_session.xenapi.VM.add_to_xenstore_data(self.vm_ref,
"vm-data/networks/%s/gateway6" % vif_device, network_gateway6)
elif change.get('custom_params'):
for position in change['custom_params']:
custom_param_key = self.module.params['custom_params'][position]['key']
custom_param_value = self.module.params['custom_params'][position]['value']
self.xapi_session.xenapi_request("VM.set_%s" % custom_param_key, (self.vm_ref, custom_param_value))
if self.module.params['is_template']:
self.xapi_session.xenapi.VM.set_is_a_template(self.vm_ref, True)
elif "need_poweredoff" in config_changes and self.module.params['force'] and vm_power_state_save != 'halted':
self.set_power_state("poweredon")
# Gather new params after reconfiguration.
self.gather_params()
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
return config_changes
def destroy(self):
"""Removes an existing VM with associated disks"""
# Safety check.
if not self.exists():
self.module.fail_json(msg="Called destroy on non existing VM!")
if self.vm_params['power_state'].lower() != 'halted' and not self.module.params['force']:
self.module.fail_json(msg="VM destroy: VM has to be in powered off state to destroy but force was not specified!")
# Support for Ansible check mode.
if self.module.check_mode:
return
# Make sure that VM is poweredoff before we can destroy it.
self.set_power_state("poweredoff")
try:
# Destroy VM!
self.xapi_session.xenapi.VM.destroy(self.vm_ref)
vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
# Destroy all VDIs associated with VM!
for vm_disk_params in vm_disk_params_list:
vdi_ref = self.xapi_session.xenapi.VDI.get_by_uuid(vm_disk_params['VDI']['uuid'])
self.xapi_session.xenapi.VDI.destroy(vdi_ref)
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
def get_changes(self):
"""Finds VM parameters that differ from specified ones.
This method builds a dictionary with hierarchy of VM parameters
that differ from those specified in module parameters.
Returns:
list: VM parameters that differ from those specified in
module parameters.
"""
# Safety check.
if not self.exists():
self.module.fail_json(msg="Called get_changes on non existing VM!")
need_poweredoff = False
if self.module.params['is_template']:
need_poweredoff = True
try:
# This VM could be a template or a snapshot. In that case we fail
# because we can't reconfigure them or it would just be too
# dangerous.
if self.vm_params['is_a_template'] and not self.vm_params['is_a_snapshot']:
self.module.fail_json(msg="VM check: targeted VM is a template! Template reconfiguration is not supported.")
if self.vm_params['is_a_snapshot']:
self.module.fail_json(msg="VM check: targeted VM is a snapshot! Snapshot reconfiguration is not supported.")
# Let's build a list of parameters that changed.
config_changes = []
# Name could only differ if we found an existing VM by uuid.
if self.module.params['name'] is not None and self.module.params['name'] != self.vm_params['name_label']:
if self.module.params['name']:
config_changes.append('name')
else:
self.module.fail_json(msg="VM check name: VM name cannot be an empty string!")
if self.module.params['name_desc'] is not None and self.module.params['name_desc'] != self.vm_params['name_description']:
config_changes.append('name_desc')
# Folder parameter is found in other_config.
vm_other_config = self.vm_params['other_config']
vm_folder = vm_other_config.get('folder', '')
if self.module.params['folder'] is not None and self.module.params['folder'] != vm_folder:
config_changes.append('folder')
if self.module.params['home_server'] is not None:
if (self.module.params['home_server'] and
(not self.vm_params['affinity'] or self.module.params['home_server'] != self.vm_params['affinity']['name_label'])):
# Check existance only. Ignore return value.
get_object_ref(self.module, self.module.params['home_server'], uuid=None, obj_type="home server", fail=True,
msg_prefix="VM check home_server: ")
config_changes.append('home_server')
elif not self.module.params['home_server'] and self.vm_params['affinity']:
config_changes.append('home_server')
config_changes_hardware = []
if self.module.params['hardware']:
num_cpus = self.module.params['hardware'].get('num_cpus')
if num_cpus is not None:
# Kept for compatibility with older Ansible versions that
# do not support subargument specs.
try:
num_cpus = int(num_cpus)
except ValueError as e:
self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be an integer value!")
if num_cpus < 1:
self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be greater than zero!")
# We can use VCPUs_at_startup or VCPUs_max parameter. I'd
# say the former is the way to go but this needs
# confirmation and testing.
if num_cpus != int(self.vm_params['VCPUs_at_startup']):
config_changes_hardware.append('num_cpus')
# For now, we don't support hotpluging so VM has to be in
# poweredoff state to reconfigure.
need_poweredoff = True
num_cpu_cores_per_socket = self.module.params['hardware'].get('num_cpu_cores_per_socket')
if num_cpu_cores_per_socket is not None:
# Kept for compatibility with older Ansible versions that
# do not support subargument specs.
try:
num_cpu_cores_per_socket = int(num_cpu_cores_per_socket)
except ValueError as e:
self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be an integer value!")
if num_cpu_cores_per_socket < 1:
self.module.fail_json(msg="VM check hardware.num_cpu_cores_per_socket: parameter should be greater than zero!")
if num_cpus and num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="VM check hardware.num_cpus: parameter should be a multiple of hardware.num_cpu_cores_per_socket!")
vm_platform = self.vm_params['platform']
vm_cores_per_socket = int(vm_platform.get('cores-per-socket', 1))
if num_cpu_cores_per_socket != vm_cores_per_socket:
config_changes_hardware.append('num_cpu_cores_per_socket')
# For now, we don't support hotpluging so VM has to be
# in poweredoff state to reconfigure.
need_poweredoff = True
memory_mb = self.module.params['hardware'].get('memory_mb')
if memory_mb is not None:
# Kept for compatibility with older Ansible versions that
# do not support subargument specs.
try:
memory_mb = int(memory_mb)
except ValueError as e:
self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be an integer value!")
if memory_mb < 1:
self.module.fail_json(msg="VM check hardware.memory_mb: parameter should be greater than zero!")
# There are multiple memory parameters:
# - memory_dynamic_max
# - memory_dynamic_min
# - memory_static_max
# - memory_static_min
# - memory_target
#
# memory_target seems like a good candidate but it returns 0 for
# halted VMs so we can't use it.
#
# I decided to use memory_dynamic_max and memory_static_max
# and use whichever is larger. This strategy needs validation
# and testing.
#
# XenServer stores memory size in bytes so we need to divide
# it by 1024*1024 = 1048576.
if memory_mb != int(max(int(self.vm_params['memory_dynamic_max']), int(self.vm_params['memory_static_max'])) / 1048576):
config_changes_hardware.append('memory_mb')
# For now, we don't support hotpluging so VM has to be in
# poweredoff state to reconfigure.
need_poweredoff = True
if config_changes_hardware:
config_changes.append({"hardware": config_changes_hardware})
config_changes_disks = []
config_new_disks = []
# Find allowed userdevices.
vbd_userdevices_allowed = self.xapi_session.xenapi.VM.get_allowed_VBD_devices(self.vm_ref)
if self.module.params['disks']:
# Get the list of all disk. Filter out any CDs found.
vm_disk_params_list = [disk_params for disk_params in self.vm_params['VBDs'] if disk_params['type'] == "Disk"]
# Number of disks defined in module params have to be same or
# higher than a number of existing disks attached to the VM.
# We don't support removal or detachment of disks.
if len(self.module.params['disks']) < len(vm_disk_params_list):
self.module.fail_json(msg="VM check disks: provided disks configuration has less disks than the target VM (%d < %d)!" %
(len(self.module.params['disks']), len(vm_disk_params_list)))
# Find the highest disk occupied userdevice.
if not vm_disk_params_list:
vm_disk_userdevice_highest = "-1"
else:
vm_disk_userdevice_highest = vm_disk_params_list[-1]['userdevice']
for position in range(len(self.module.params['disks'])):
if position < len(vm_disk_params_list):
vm_disk_params = vm_disk_params_list[position]
else:
vm_disk_params = None
disk_params = self.module.params['disks'][position]
disk_size = self.get_normalized_disk_size(self.module.params['disks'][position], "VM check disks[%s]: " % position)
disk_name = disk_params.get('name')
if disk_name is not None and not disk_name:
self.module.fail_json(msg="VM check disks[%s]: disk name cannot be an empty string!" % position)
# If this is an existing disk.
if vm_disk_params and vm_disk_params['VDI']:
disk_changes = []
if disk_name and disk_name != vm_disk_params['VDI']['name_label']:
disk_changes.append('name')
disk_name_desc = disk_params.get('name_desc')
if disk_name_desc is not None and disk_name_desc != vm_disk_params['VDI']['name_description']:
disk_changes.append('name_desc')
if disk_size:
if disk_size > int(vm_disk_params['VDI']['virtual_size']):
disk_changes.append('size')
need_poweredoff = True
elif disk_size < int(vm_disk_params['VDI']['virtual_size']):
self.module.fail_json(msg="VM check disks[%s]: disk size is smaller than existing (%d bytes < %s bytes). "
"Reducing disk size is not allowed!" % (position, disk_size, vm_disk_params['VDI']['virtual_size']))
config_changes_disks.append(disk_changes)
# If this is a new disk.
else:
if not disk_size:
self.module.fail_json(msg="VM check disks[%s]: no valid disk size specification found!" % position)
disk_sr_uuid = disk_params.get('sr_uuid')
disk_sr = disk_params.get('sr')
if disk_sr_uuid is not None or disk_sr is not None:
# Check existance only. Ignore return value.
get_object_ref(self.module, disk_sr, disk_sr_uuid, obj_type="SR", fail=True,
msg_prefix="VM check disks[%s]: " % position)
elif self.default_sr_ref == 'OpaqueRef:NULL':
self.module.fail_json(msg="VM check disks[%s]: no default SR found! You must specify SR explicitly." % position)
if not vbd_userdevices_allowed:
self.module.fail_json(msg="VM check disks[%s]: maximum number of devices reached!" % position)
disk_userdevice = None
# We need to place a new disk right above the highest
# placed existing disk to maintain relative disk
# positions pairable with disk specifications in
# module params. That place must not be occupied by
# some other device like CD-ROM.
for userdevice in vbd_userdevices_allowed:
if int(userdevice) > int(vm_disk_userdevice_highest):
disk_userdevice = userdevice
vbd_userdevices_allowed.remove(userdevice)
vm_disk_userdevice_highest = userdevice
break
# If no place was found.
if disk_userdevice is None:
# Highest occupied place could be a CD-ROM device
# so we have to include all devices regardless of
# type when calculating out-of-bound position.
disk_userdevice = str(int(self.vm_params['VBDs'][-1]['userdevice']) + 1)
self.module.fail_json(msg="VM check disks[%s]: new disk position %s is out of bounds!" % (position, disk_userdevice))
# For new disks we only track their position.
config_new_disks.append((position, disk_userdevice))
# We should append config_changes_disks to config_changes only
# if there is at least one changed disk, else skip.
for disk_change in config_changes_disks:
if disk_change:
config_changes.append({"disks_changed": config_changes_disks})
break
if config_new_disks:
config_changes.append({"disks_new": config_new_disks})
config_changes_cdrom = []
if self.module.params['cdrom']:
# Get the list of all CD-ROMs. Filter out any regular disks
# found. If we found no existing CD-ROM, we will create it
# later else take the first one found.
vm_cdrom_params_list = [cdrom_params for cdrom_params in self.vm_params['VBDs'] if cdrom_params['type'] == "CD"]
# If no existing CD-ROM is found, we will need to add one.
# We need to check if there is any userdevice allowed.
if not vm_cdrom_params_list and not vbd_userdevices_allowed:
self.module.fail_json(msg="VM check cdrom: maximum number of devices reached!")
cdrom_type = self.module.params['cdrom'].get('type')
cdrom_iso_name = self.module.params['cdrom'].get('iso_name')
# If cdrom.iso_name is specified but cdrom.type is not,
# then set cdrom.type to 'iso', unless cdrom.iso_name is
# an empty string, in that case set cdrom.type to 'none'.
if not cdrom_type:
if cdrom_iso_name:
cdrom_type = "iso"
elif cdrom_iso_name is not None:
cdrom_type = "none"
self.module.params['cdrom']['type'] = cdrom_type
# If type changed.
if cdrom_type and (not vm_cdrom_params_list or cdrom_type != self.get_cdrom_type(vm_cdrom_params_list[0])):
config_changes_cdrom.append('type')
if cdrom_type == "iso":
# Check if ISO exists.
# Check existance only. Ignore return value.
get_object_ref(self.module, cdrom_iso_name, uuid=None, obj_type="ISO image", fail=True,
msg_prefix="VM check cdrom.iso_name: ")
# Is ISO image changed?
if (cdrom_iso_name and
(not vm_cdrom_params_list or
not vm_cdrom_params_list[0]['VDI'] or
cdrom_iso_name != vm_cdrom_params_list[0]['VDI']['name_label'])):
config_changes_cdrom.append('iso_name')
if config_changes_cdrom:
config_changes.append({"cdrom": config_changes_cdrom})
config_changes_networks = []
config_new_networks = []
# Find allowed devices.
vif_devices_allowed = self.xapi_session.xenapi.VM.get_allowed_VIF_devices(self.vm_ref)
if self.module.params['networks']:
# Number of VIFs defined in module params have to be same or
# higher than a number of existing VIFs attached to the VM.
# We don't support removal of VIFs.
if len(self.module.params['networks']) < len(self.vm_params['VIFs']):
self.module.fail_json(msg="VM check networks: provided networks configuration has less interfaces than the target VM (%d < %d)!" %
(len(self.module.params['networks']), len(self.vm_params['VIFs'])))
# Find the highest occupied device.
if not self.vm_params['VIFs']:
vif_device_highest = "-1"
else:
vif_device_highest = self.vm_params['VIFs'][-1]['device']
for position in range(len(self.module.params['networks'])):
if position < len(self.vm_params['VIFs']):
vm_vif_params = self.vm_params['VIFs'][position]
else:
vm_vif_params = None
network_params = self.module.params['networks'][position]
network_name = network_params.get('name')
if network_name is not None and not network_name:
self.module.fail_json(msg="VM check networks[%s]: network name cannot be an empty string!" % position)
if network_name:
# Check existance only. Ignore return value.
get_object_ref(self.module, network_name, uuid=None, obj_type="network", fail=True,
msg_prefix="VM check networks[%s]: " % position)
network_mac = network_params.get('mac')
if network_mac is not None:
network_mac = network_mac.lower()
if not is_valid_mac_addr(network_mac):
self.module.fail_json(msg="VM check networks[%s]: specified MAC address '%s' is not valid!" % (position, network_mac))
# IPv4 reconfiguration.
network_type = network_params.get('type')
network_ip = network_params.get('ip')
network_netmask = network_params.get('netmask')
network_prefix = None
# If networks.ip is specified and networks.type is not,
# then set networks.type to 'static'.
if not network_type and network_ip:
network_type = "static"
# XenServer natively supports only 'none' and 'static'
# type with 'none' being the same as 'dhcp'.
if self.vm_params['customization_agent'] == "native" and network_type and network_type == "dhcp":
network_type = "none"
if network_type and network_type == "static":
if network_ip is not None:
network_ip_split = network_ip.split('/')
network_ip = network_ip_split[0]
if network_ip and not is_valid_ip_addr(network_ip):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 address '%s' is not valid!" % (position, network_ip))
if len(network_ip_split) > 1:
network_prefix = network_ip_split[1]
if not is_valid_ip_prefix(network_prefix):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 prefix '%s' is not valid!" % (position, network_prefix))
if network_netmask is not None:
if not is_valid_ip_netmask(network_netmask):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 netmask '%s' is not valid!" % (position, network_netmask))
network_prefix = ip_netmask_to_prefix(network_netmask, skip_check=True)
elif network_prefix is not None:
network_netmask = ip_prefix_to_netmask(network_prefix, skip_check=True)
# If any parameter is overridden at this point, update it.
if network_type:
network_params['type'] = network_type
if network_ip:
network_params['ip'] = network_ip
if network_netmask:
network_params['netmask'] = network_netmask
if network_prefix:
network_params['prefix'] = network_prefix
network_gateway = network_params.get('gateway')
# Gateway can be an empty string (when removing gateway
# configuration) but if it is not, it should be validated.
if network_gateway and not is_valid_ip_addr(network_gateway):
self.module.fail_json(msg="VM check networks[%s]: specified IPv4 gateway '%s' is not valid!" % (position, network_gateway))
# IPv6 reconfiguration.
network_type6 = network_params.get('type6')
network_ip6 = network_params.get('ip6')
network_prefix6 = None
# If networks.ip6 is specified and networks.type6 is not,
# then set networks.type6 to 'static'.
if not network_type6 and network_ip6:
network_type6 = "static"
# XenServer natively supports only 'none' and 'static'
# type with 'none' being the same as 'dhcp'.
if self.vm_params['customization_agent'] == "native" and network_type6 and network_type6 == "dhcp":
network_type6 = "none"
if network_type6 and network_type6 == "static":
if network_ip6 is not None:
network_ip6_split = network_ip6.split('/')
network_ip6 = network_ip6_split[0]
if network_ip6 and not is_valid_ip6_addr(network_ip6):
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 address '%s' is not valid!" % (position, network_ip6))
if len(network_ip6_split) > 1:
network_prefix6 = network_ip6_split[1]
if not is_valid_ip6_prefix(network_prefix6):
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 prefix '%s' is not valid!" % (position, network_prefix6))
# If any parameter is overridden at this point, update it.
if network_type6:
network_params['type6'] = network_type6
if network_ip6:
network_params['ip6'] = network_ip6
if network_prefix6:
network_params['prefix6'] = network_prefix6
network_gateway6 = network_params.get('gateway6')
# Gateway can be an empty string (when removing gateway
# configuration) but if it is not, it should be validated.
if network_gateway6 and not is_valid_ip6_addr(network_gateway6):
self.module.fail_json(msg="VM check networks[%s]: specified IPv6 gateway '%s' is not valid!" % (position, network_gateway6))
# If this is an existing VIF.
if vm_vif_params and vm_vif_params['network']:
network_changes = []
if network_name and network_name != vm_vif_params['network']['name_label']:
network_changes.append('name')
if network_mac and network_mac != vm_vif_params['MAC'].lower():
network_changes.append('mac')
if self.vm_params['customization_agent'] == "native":
if network_type and network_type != vm_vif_params['ipv4_configuration_mode'].lower():
network_changes.append('type')
if network_type and network_type == "static":
if network_ip and (not vm_vif_params['ipv4_addresses'] or
not vm_vif_params['ipv4_addresses'][0] or
network_ip != vm_vif_params['ipv4_addresses'][0].split('/')[0]):
network_changes.append('ip')
if network_prefix and (not vm_vif_params['ipv4_addresses'] or
not vm_vif_params['ipv4_addresses'][0] or
network_prefix != vm_vif_params['ipv4_addresses'][0].split('/')[1]):
network_changes.append('prefix')
network_changes.append('netmask')
if network_gateway is not None and network_gateway != vm_vif_params['ipv4_gateway']:
network_changes.append('gateway')
if network_type6 and network_type6 != vm_vif_params['ipv6_configuration_mode'].lower():
network_changes.append('type6')
if network_type6 and network_type6 == "static":
if network_ip6 and (not vm_vif_params['ipv6_addresses'] or
not vm_vif_params['ipv6_addresses'][0] or
network_ip6 != vm_vif_params['ipv6_addresses'][0].split('/')[0]):
network_changes.append('ip6')
if network_prefix6 and (not vm_vif_params['ipv6_addresses'] or
not vm_vif_params['ipv6_addresses'][0] or
network_prefix6 != vm_vif_params['ipv6_addresses'][0].split('/')[1]):
network_changes.append('prefix6')
if network_gateway6 is not None and network_gateway6 != vm_vif_params['ipv6_gateway']:
network_changes.append('gateway6')
elif self.vm_params['customization_agent'] == "custom":
vm_xenstore_data = self.vm_params['xenstore_data']
if network_type and network_type != vm_xenstore_data.get('vm-data/networks/%s/type' % vm_vif_params['device'], "none"):
network_changes.append('type')
need_poweredoff = True
if network_type and network_type == "static":
if network_ip and network_ip != vm_xenstore_data.get('vm-data/networks/%s/ip' % vm_vif_params['device'], ""):
network_changes.append('ip')
need_poweredoff = True
if network_prefix and network_prefix != vm_xenstore_data.get('vm-data/networks/%s/prefix' % vm_vif_params['device'], ""):
network_changes.append('prefix')
network_changes.append('netmask')
need_poweredoff = True
if network_gateway is not None and network_gateway != vm_xenstore_data.get('vm-data/networks/%s/gateway' %
vm_vif_params['device'], ""):
network_changes.append('gateway')
need_poweredoff = True
if network_type6 and network_type6 != vm_xenstore_data.get('vm-data/networks/%s/type6' % vm_vif_params['device'], "none"):
network_changes.append('type6')
need_poweredoff = True
if network_type6 and network_type6 == "static":
if network_ip6 and network_ip6 != vm_xenstore_data.get('vm-data/networks/%s/ip6' % vm_vif_params['device'], ""):
network_changes.append('ip6')
need_poweredoff = True
if network_prefix6 and network_prefix6 != vm_xenstore_data.get('vm-data/networks/%s/prefix6' % vm_vif_params['device'], ""):
network_changes.append('prefix6')
need_poweredoff = True
if network_gateway6 is not None and network_gateway6 != vm_xenstore_data.get('vm-data/networks/%s/gateway6' %
vm_vif_params['device'], ""):
network_changes.append('gateway6')
need_poweredoff = True
config_changes_networks.append(network_changes)
# If this is a new VIF.
else:
if not network_name:
self.module.fail_json(msg="VM check networks[%s]: network name is required for new network interface!" % position)
if network_type and network_type == "static" and network_ip and not network_netmask:
self.module.fail_json(msg="VM check networks[%s]: IPv4 netmask or prefix is required for new network interface!" % position)
if network_type6 and network_type6 == "static" and network_ip6 and not network_prefix6:
self.module.fail_json(msg="VM check networks[%s]: IPv6 prefix is required for new network interface!" % position)
# Restart is needed if we are adding new network
# interface with IP/gateway parameters specified
# and custom agent is used.
if self.vm_params['customization_agent'] == "custom":
for parameter in ['type', 'ip', 'prefix', 'gateway', 'type6', 'ip6', 'prefix6', 'gateway6']:
if network_params.get(parameter):
need_poweredoff = True
break
if not vif_devices_allowed:
self.module.fail_json(msg="VM check networks[%s]: maximum number of network interfaces reached!" % position)
# We need to place a new network interface right above the
# highest placed existing interface to maintain relative
# positions pairable with network interface specifications
# in module params.
vif_device = str(int(vif_device_highest) + 1)
if vif_device not in vif_devices_allowed:
self.module.fail_json(msg="VM check networks[%s]: new network interface position %s is out of bounds!" % (position, vif_device))
vif_devices_allowed.remove(vif_device)
vif_device_highest = vif_device
# For new VIFs we only track their position.
config_new_networks.append((position, vif_device))
# We should append config_changes_networks to config_changes only
# if there is at least one changed network, else skip.
for network_change in config_changes_networks:
if network_change:
config_changes.append({"networks_changed": config_changes_networks})
break
if config_new_networks:
config_changes.append({"networks_new": config_new_networks})
config_changes_custom_params = []
if self.module.params['custom_params']:
for position in range(len(self.module.params['custom_params'])):
custom_param = self.module.params['custom_params'][position]
custom_param_key = custom_param['key']
custom_param_value = custom_param['value']
if custom_param_key not in self.vm_params:
self.module.fail_json(msg="VM check custom_params[%s]: unknown VM param '%s'!" % (position, custom_param_key))
if custom_param_value != self.vm_params[custom_param_key]:
# We only need to track custom param position.
config_changes_custom_params.append(position)
if config_changes_custom_params:
config_changes.append({"custom_params": config_changes_custom_params})
if need_poweredoff:
config_changes.append('need_poweredoff')
return config_changes
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
def get_normalized_disk_size(self, disk_params, msg_prefix=""):
"""Parses disk size parameters and returns disk size in bytes.
This method tries to parse disk size module parameters. It fails
with an error message if size cannot be parsed.
Args:
disk_params (dist): A dictionary with disk parameters.
msg_prefix (str): A string error messages should be prefixed
with (default: "").
Returns:
int: disk size in bytes if disk size is successfully parsed or
None if no disk size parameters were found.
"""
# There should be only single size spec but we make a list of all size
# specs just in case. Priority is given to 'size' but if not found, we
# check for 'size_tb', 'size_gb', 'size_mb' etc. and use first one
# found.
disk_size_spec = [x for x in disk_params.keys() if disk_params[x] is not None and (x.startswith('size_') or x == 'size')]
if disk_size_spec:
try:
# size
if "size" in disk_size_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)\s*(.*)')
disk_size_m = size_regex.match(disk_params['size'])
if disk_size_m:
size = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
# size_tb, size_gb, size_mb, size_kb, size_b
else:
size = disk_params[disk_size_spec[0]]
unit = disk_size_spec[0].split('_')[-1]
if not unit:
unit = "b"
else:
unit = unit.lower()
if re.match(r'\d+\.\d+', size):
# We found float value in string, let's typecast it.
if unit == "b":
# If we found float but unit is bytes, we get the integer part only.
size = int(float(size))
else:
size = float(size)
else:
# We found int value in string, let's typecast it.
size = int(size)
if not size or size < 0:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="%sfailed to parse disk size! Please review value provided using documentation." % msg_prefix)
disk_units = dict(tb=4, gb=3, mb=2, kb=1, b=0)
if unit in disk_units:
return int(size * (1024 ** disk_units[unit]))
else:
self.module.fail_json(msg="%s'%s' is not a supported unit for disk size! Supported units are ['%s']." %
(msg_prefix, unit, "', '".join(sorted(disk_units.keys(), key=lambda key: disk_units[key]))))
else:
return None
@staticmethod
def get_cdrom_type(vm_cdrom_params):
"""Returns VM CD-ROM type."""
# TODO: implement support for detecting type host. No server to test
# this on at the moment.
if vm_cdrom_params['empty']:
return "none"
else:
return "iso"
def main():
argument_spec = xenserver_common_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['present', 'absent', 'poweredon']),
name=dict(type='str', aliases=['name_label']),
name_desc=dict(type='str'),
uuid=dict(type='str'),
template=dict(type='str', aliases=['template_src']),
template_uuid=dict(type='str'),
is_template=dict(type='bool', default=False),
folder=dict(type='str'),
hardware=dict(
type='dict',
options=dict(
num_cpus=dict(type='int'),
num_cpu_cores_per_socket=dict(type='int'),
memory_mb=dict(type='int'),
),
),
disks=dict(
type='list',
elements='dict',
options=dict(
size=dict(type='str'),
size_tb=dict(type='str'),
size_gb=dict(type='str'),
size_mb=dict(type='str'),
size_kb=dict(type='str'),
size_b=dict(type='str'),
name=dict(type='str', aliases=['name_label']),
name_desc=dict(type='str'),
sr=dict(type='str'),
sr_uuid=dict(type='str'),
),
aliases=['disk'],
mutually_exclusive=[
['size', 'size_tb', 'size_gb', 'size_mb', 'size_kb', 'size_b'],
['sr', 'sr_uuid'],
],
),
cdrom=dict(
type='dict',
options=dict(
type=dict(type='str', choices=['none', 'iso']),
iso_name=dict(type='str'),
),
required_if=[
['type', 'iso', ['iso_name']],
],
),
networks=dict(
type='list',
elements='dict',
options=dict(
name=dict(type='str', aliases=['name_label']),
mac=dict(type='str'),
type=dict(type='str', choices=['none', 'dhcp', 'static']),
ip=dict(type='str'),
netmask=dict(type='str'),
gateway=dict(type='str'),
type6=dict(type='str', choices=['none', 'dhcp', 'static']),
ip6=dict(type='str'),
gateway6=dict(type='str'),
),
aliases=['network'],
required_if=[
['type', 'static', ['ip']],
['type6', 'static', ['ip6']],
],
),
home_server=dict(type='str'),
custom_params=dict(
type='list',
elements='dict',
options=dict(
key=dict(type='str', required=True),
value=dict(type='raw', required=True),
),
),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
linked_clone=dict(type='bool', default=False),
force=dict(type='bool', default=False),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['name', 'uuid'],
],
mutually_exclusive=[
['template', 'template_uuid'],
],
)
result = {'failed': False, 'changed': False}
vm = XenServerVM(module)
# Find existing VM
if vm.exists():
if module.params['state'] == "absent":
vm.destroy()
result['changed'] = True
elif module.params['state'] == "present":
config_changes = vm.reconfigure()
if config_changes:
result['changed'] = True
# Make new disk and network changes more user friendly
# and informative.
for change in config_changes:
if isinstance(change, dict):
if change.get('disks_new'):
disks_new = []
for position, userdevice in change['disks_new']:
disk_new_params = {"position": position, "vbd_userdevice": userdevice}
disk_params = module.params['disks'][position]
for k in disk_params.keys():
if disk_params[k] is not None:
disk_new_params[k] = disk_params[k]
disks_new.append(disk_new_params)
if disks_new:
change['disks_new'] = disks_new
elif change.get('networks_new'):
networks_new = []
for position, device in change['networks_new']:
network_new_params = {"position": position, "vif_device": device}
network_params = module.params['networks'][position]
for k in network_params.keys():
if network_params[k] is not None:
network_new_params[k] = network_params[k]
networks_new.append(network_new_params)
if networks_new:
change['networks_new'] = networks_new
result['changes'] = config_changes
elif module.params['state'] in ["poweredon", "poweredoff", "restarted", "shutdownguest", "rebootguest", "suspended"]:
result['changed'] = vm.set_power_state(module.params['state'])
elif module.params['state'] != "absent":
vm.deploy()
result['changed'] = True
if module.params['wait_for_ip_address'] and module.params['state'] != "absent":
vm.wait_for_ip_address()
result['instance'] = vm.gather_facts()
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
clarkperkins/stackdio | stackdio/api/cloud/utils.py | 2 | 2418 | # -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import importlib
import logging
import re
from django.conf import settings
from stackdio.core.config import StackdioConfigException
logger = logging.getLogger(__name__)
def get_provider_driver_class(provider):
provider_classes = get_cloud_providers()
for provider_class in provider_classes:
if provider_class.SHORT_NAME == provider.name:
return provider_class
return None
def check_cloud_provider_settings():
if not hasattr(settings, 'CLOUD_PROVIDERS'):
raise StackdioConfigException(
'settings.CLOUD_PROVIDERS must set with a list of supported cloud providers.'
)
def get_cloud_provider_choices():
check_cloud_provider_settings()
choices = []
for provider in get_cloud_providers():
choices.append(provider.get_provider_choice())
return choices
def get_cloud_providers():
check_cloud_provider_settings()
providers = []
for class_path in settings.CLOUD_PROVIDERS:
try:
module_path, class_name = class_path.rsplit('.', 1)
module = importlib.import_module(module_path)
providers.append(getattr(module, class_name))
except ImportError as e:
msg = 'Could not import {0} from settings.CLOUD_PROVIDERS'.format(class_path)
logger.error(e)
raise StackdioConfigException(msg)
return providers
def find_roles(filename, pattern):
with open(filename) as f:
recording = False
for line in f:
# if line.startswith(pattern):
# re.match('^(\s)+-\s(?!match\:)', line)
if re.match(pattern, line):
yield line
recording = not recording
elif recording:
yield line
| apache-2.0 |
CiscoSystems/nova | nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py | 31 | 4912 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import extended_server_attributes
from nova import compute
from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from oslo.config import cfg
NAME_FMT = cfg.CONF.instance_name_template
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj.Instance._from_db_object(
args[1], instance_obj.Instance(),
fakes.stub_instance(1, uuid=UUID3, host="host-fake",
node="node-fake"), fields)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"),
fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2")
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
instance_obj.InstanceList(),
db_list, fields)
class ExtendedServerAttributesTest(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-SRV-ATTR:'
def setUp(self):
super(ExtendedServerAttributesTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Extended_server_attributes'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def assertServerAttributes(self, server, host, node, instance_name):
self.assertEqual(server.get('%shost' % self.prefix), host)
self.assertEqual(server.get('%sinstance_name' % self.prefix),
instance_name)
self.assertEqual(server.get('%shypervisor_hostname' % self.prefix),
node)
def test_show(self):
url = '/v2/fake/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerAttributes(self._get_server(res.body),
host='host-fake',
node='node-fake',
instance_name=NAME_FMT % 1)
def test_detail(self):
url = '/v2/fake/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerAttributes(server,
host='host-%s' % (i + 1),
node='node-%s' % (i + 1),
instance_name=NAME_FMT % (i + 1))
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTest):
content_type = 'application/xml'
ext = extended_server_attributes
prefix = '{%s}' % ext.Extended_server_attributes.namespace
def _get_server(self, body):
return etree.XML(body)
def _get_servers(self, body):
return etree.XML(body).getchildren()
| apache-2.0 |
ngageoint/geoq | geoq/proxy/tests.py | 1 | 6015 | from django.test import TestCase,Client
from httmock import urlmatch, response, HTTMock
import os
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from .models import *
def register_valid_proxy(name,url,refresh=100):
p = SourceDocument.objects.create(Name=name,SourceURL=url,Refresh=refresh)
p.save()
p.refresh(force=True)
class MyMock:
""" uses HTTMock but adds a state variable so I can check which calls got made and how many """
def __init__(self):
self.state = []
@urlmatch(netloc=r'(.*\.)?validkmz\.com$')
def validkmz_mock(self,url, request):
self.state.append("downloaded"+str(url))
return open(os.path.join("proxy","testdata","mykmz.kmz")).read()
@urlmatch(netloc=r'(.*\.)?boguskmz\.com$')
def boguskmz_mock(self,url, request):
self.state.append("failed to download"+str(url))
return response(404)
class Duplicates(TestCase):
""" placeholder for needing to test trying to register kmz with the same name or two kmz with the same child names or a kmz with two dupe children """
pass
class RegisterTests(TestCase):
""" As a user, I want to be able to access proxies but I can't configure/edit them without having appropiate permissions """
def setUp(self):
""" create a test user and log them in, setup new mock object"""
self.user = User.objects.create_user("bob",password="bob")
self.user.save()
self.c = Client()
self.c.login(username="bob",password="bob")
self.myMock = MyMock()
def test_permissions(self):
""" check that an anoymous user can access proxies but can't register new ones"""
with HTTMock(self.myMock.validkmz_mock):
self.c.logout()
r = self.c.get("/proxy/")
self.assertEqual(200, r.status_code)
register_valid_proxy("bob",url="http://validkmz.com/data/some.kmz",refresh=100) #this should be long enough that we don't refresh from registration
r = self.c.get("/proxy/")
self.assertEqual(200, r.status_code)
self.assertContains(r,"bob")
r = self.c.get("/proxy/kmz/bob/")
self.assertEqual(200, r.status_code)
r = self.c.get("/proxy/kmz/notbob/")
self.assertEqual(404, r.status_code)
r = self.c.post("/proxy/register/",{"Name":"bob2","SourceURL":"http://validkmz.com/data/someother.kmz","Type":"kmz"})
self.assertEqual(302, r.status_code) #redirects to login (or would try to ... )
newloc = r._headers.get('location',("","fail"))[1]
self.assertNotEqual(-1,newloc.find("login"),"Should have redirected user to login")
self.assertEqual(1,len(self.myMock.state),"should have only had one call go out")
self.assertTrue(self.myMock.state[0].find("downloaded") != -1)
def test_valid_registration(self):
""" test that a valid user can register a new kmz file"""
with HTTMock(self.myMock.validkmz_mock):
r = self.c.post("/proxy/register/",{"Name":"bob2","SourceURL":"http://validkmz.com/data/someother.kmz","Type":"kmz"})
self.assertEqual(302, r.status_code)
r = self.c.get("/proxy/")
self.assertEqual(200, r.status_code)
self.assertContains(r,"bob2")
r = self.c.get("/proxy/kmz/bob2/")
self.assertEqual(200, r.status_code)
def test_invalid_registration(self):
""" allow the user to register a non-working KMZ file but warn them (and return dummy kml """
with HTTMock(self.myMock.boguskmz_mock):
r = self.c.post("/proxy/register/",{"Name":"badbob","SourceURL":"http://boguskmz.com/data/someother.kmz","Type":"kmz"})
self.assertEqual(302, r.status_code)
r = self.c.get("/proxy/kmz/badbob/")
self.assertContains(r,"Warning")
r = self.c.get("/proxy/")
self.assertEqual(200, r.status_code)
self.assertContains(r,"badbob")
r = self.c.get("/proxy/kmz/badbob/")
self.assertEqual(200, r.status_code)
self.assertContains(r,"Warning: KMZ file is currently unavailable")
class CacheTests(TestCase):
def setUp(self):
""" create a kmz file registration """
self.myMock = MyMock()
self.user = User.objects.create_user("bob",password="bob")
self.user.save()
self.c = Client()
self.c.login(username="bob",password="bob")
with HTTMock(self.myMock.validkmz_mock):
register_valid_proxy("proxytest",url="http://validkmz.com/data/some.kmz",refresh=3)
def makeRequest(self,n="proxytest"):
with HTTMock(self.myMock.validkmz_mock):
r = self.c.get("/proxy/kmz/"+n+"/")
self.assertEqual(200, r.status_code)
#todo: introspection
for img in [slugify("files/neko.png"),slugify("files/icon56.png")]:
r = self.c.get("/proxy/image/%s/%s/"%(n,img))
self.assertEqual(200, r.status_code)
r = self.c.get("/proxy/image/%s/boguspng/"%n)
self.assertEqual(404, r.status_code)
def stestFirstRequest(self):
""" test that the first request after registration works (assumes right after registration """
self.makeRequest("proxytest")
self.assertEqual(1,len(self.myMock.state),"should have only had one call go out")
def testLaterRequest(self):
""" test that a subsequent request triggers a refresh """
import time
time.sleep(5) # ugh...
self.makeRequest("proxytest")
self.assertEqual(2,len(self.myMock.state),"should have only had one call go out")
class ConncurrentTests(TestCase):
def setUp(self):
pass
def testDualUpdates(self):
print("Do concurrent tests once we figure out how to do so")
#self.assertEqual("do I know how to test this","yes")
| mit |
bokeh/bokeh | tests/integration/widgets/test_toggle.py | 1 | 4531 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from flaky import flaky
# Bokeh imports
from bokeh._testing.util.selenium import RECORD
from bokeh.core.enums import ButtonType
from bokeh.layouts import column
from bokeh.models import (
Circle,
ColumnDataSource,
CustomAction,
CustomJS,
Plot,
Range1d,
Toggle,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
@pytest.mark.selenium
class Test_Toggle:
def test_displays_label(self, bokeh_model_page) -> None:
button = Toggle(label="label", css_classes=["foo"])
page = bokeh_model_page(button)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
assert button.text == "label"
@pytest.mark.parametrize('typ', list(ButtonType))
def test_displays_button_type(self, typ, bokeh_model_page) -> None:
button = Toggle(button_type=typ, css_classes=["foo"])
page = bokeh_model_page(button)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
assert typ in button.get_attribute('class')
@flaky(max_runs=10)
def test_server_on_click_round_trip(self, bokeh_server_page) -> None:
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
button = Toggle(css_classes=['foo'])
def cb(value):
if value:
source.data=dict(x=[10, 20], y=[10, 10])
else:
source.data=dict(x=[100, 200], y=[100, 100])
button.on_click(cb)
doc.add_root(column(button, plot))
page = bokeh_server_page(modify_doc)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [10, 20], 'y': [10, 10]}}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [100, 200], 'y': [100, 100]}}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
page.click_custom_action()
results = page.results
assert results == {'data': {'x': [10, 20], 'y': [10, 10]}}
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
# XXX (bev) Toggle does not register to process ButtonClick events
def test_js_on_click_executes(self, bokeh_model_page) -> None:
button = Toggle(css_classes=['foo'])
button.js_on_click(CustomJS(code=RECORD("value", "cb_obj.active")))
page = bokeh_model_page(button)
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
results = page.results
assert results == {'value': True}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
results = page.results
assert results == {'value': False}
button = page.driver.find_element_by_css_selector('.foo .bk-btn')
button.click()
results = page.results
assert results == {'value': True}
assert page.has_no_console_errors()
| bsd-3-clause |
CapOM/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/s3/test_bucket.py | 88 | 12516 | # -*- coding: utf-8 -*-
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 Bucket
"""
from mock import patch, Mock
import unittest
import time
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from boto.s3.bucketlogging import BucketLogging
from boto.s3.lifecycle import Lifecycle
from boto.s3.lifecycle import Transition
from boto.s3.lifecycle import Expiration
from boto.s3.lifecycle import Rule
from boto.s3.acl import Grant
from boto.s3.tagging import Tags, TagSet
from boto.s3.website import RedirectLocation
from boto.compat import urllib
class S3BucketTest (unittest.TestCase):
s3 = True
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'bucket-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for key in self.bucket:
key.delete()
self.bucket.delete()
def test_next_marker(self):
expected = ["a/", "b", "c"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# Normal list of first 2 keys will have
# no NextMarker set, so we use last key to iterate
# last element will be "b" so no issue.
rs = self.bucket.get_all_keys(max_keys=2)
for element in rs:
pass
self.assertEqual(element.name, "b")
self.assertEqual(rs.next_marker, None)
# list using delimiter of first 2 keys will have
# a NextMarker set (when truncated). As prefixes
# are grouped together at the end, we get "a/" as
# last element, but luckily we have next_marker.
rs = self.bucket.get_all_keys(max_keys=2, delimiter="/")
for element in rs:
pass
self.assertEqual(element.name, "a/")
self.assertEqual(rs.next_marker, "b")
# ensure bucket.list() still works by just
# popping elements off the front of expected.
rs = self.bucket.list()
for element in rs:
self.assertEqual(element.name, expected.pop(0))
self.assertEqual(expected, [])
def test_list_with_url_encoding(self):
expected = ["α", "β", "γ"]
for key_name in expected:
key = self.bucket.new_key(key_name)
key.set_contents_from_string(key_name)
# ensure bucket.list() still works by just
# popping elements off the front of expected.
orig_getall = self.bucket._get_all
getall = lambda *a, **k: orig_getall(*a, max_keys=2, **k)
with patch.object(self.bucket, '_get_all', getall):
rs = self.bucket.list(encoding_type="url")
for element in rs:
name = urllib.parse.unquote(element.name.encode('utf-8'))
self.assertEqual(name, expected.pop(0))
self.assertEqual(expected, [])
def test_logging(self):
# use self.bucket as the target bucket so that teardown
# will delete any log files that make it into the bucket
# automatically and all we have to do is delete the
# source bucket.
sb_name = "src-" + self.bucket_name
sb = self.conn.create_bucket(sb_name)
# grant log write perms to target bucket using canned-acl
self.bucket.set_acl("log-delivery-write")
target_bucket = self.bucket_name
target_prefix = u"jp/ログ/"
# Check existing status is disabled
bls = sb.get_logging_status()
self.assertEqual(bls.target, None)
# Create a logging status and grant auth users READ PERM
authuri = "http://acs.amazonaws.com/groups/global/AuthenticatedUsers"
authr = Grant(permission="READ", type="Group", uri=authuri)
sb.enable_logging(target_bucket, target_prefix=target_prefix, grants=[authr])
# Check the status and confirm its set.
bls = sb.get_logging_status()
self.assertEqual(bls.target, target_bucket)
self.assertEqual(bls.prefix, target_prefix)
self.assertEqual(len(bls.grants), 1)
self.assertEqual(bls.grants[0].type, "Group")
self.assertEqual(bls.grants[0].uri, authuri)
# finally delete the src bucket
sb.delete()
def test_tagging(self):
tagging = """
<Tagging>
<TagSet>
<Tag>
<Key>tagkey</Key>
<Value>tagvalue</Value>
</Tag>
</TagSet>
</Tagging>
"""
self.bucket.set_xml_tags(tagging)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'tagkey')
self.assertEqual(response[0][0].value, 'tagvalue')
self.bucket.delete_tags()
try:
self.bucket.get_tags()
except S3ResponseError as e:
self.assertEqual(e.code, 'NoSuchTagSet')
except Exception as e:
self.fail("Wrong exception raised (expected S3ResponseError): %s"
% e)
else:
self.fail("Expected S3ResponseError, but no exception raised.")
def test_tagging_from_objects(self):
"""Create tags from python objects rather than raw xml."""
t = Tags()
tag_set = TagSet()
tag_set.add_tag('akey', 'avalue')
tag_set.add_tag('anotherkey', 'anothervalue')
t.add_tag_set(tag_set)
self.bucket.set_tags(t)
response = self.bucket.get_tags()
self.assertEqual(response[0][0].key, 'akey')
self.assertEqual(response[0][0].value, 'avalue')
self.assertEqual(response[0][1].key, 'anotherkey')
self.assertEqual(response[0][1].value, 'anothervalue')
def test_website_configuration(self):
response = self.bucket.configure_website('index.html')
self.assertTrue(response)
config = self.bucket.get_website_configuration()
self.assertEqual(config, {'WebsiteConfiguration':
{'IndexDocument': {'Suffix': 'index.html'}}})
config2, xml = self.bucket.get_website_configuration_with_xml()
self.assertEqual(config, config2)
self.assertTrue('<Suffix>index.html</Suffix>' in xml, xml)
def test_website_redirect_all_requests(self):
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {
'RedirectAllRequestsTo': {
'HostName': 'example.com'}}})
# Can configure the protocol as well.
response = self.bucket.configure_website(
redirect_all_requests_to=RedirectLocation('example.com', 'https'))
config = self.bucket.get_website_configuration()
self.assertEqual(config, {
'WebsiteConfiguration': {'RedirectAllRequestsTo': {
'HostName': 'example.com',
'Protocol': 'https',
}}}
)
def test_lifecycle(self):
lifecycle = Lifecycle()
lifecycle.add_rule('myid', '', 'Enabled', 30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertEqual(actual_lifecycle.id, 'myid')
self.assertEqual(actual_lifecycle.prefix, '')
self.assertEqual(actual_lifecycle.status, 'Enabled')
self.assertEqual(actual_lifecycle.transition, None)
def test_lifecycle_with_glacier_transition(self):
lifecycle = Lifecycle()
transition = Transition(days=30, storage_class='GLACIER')
rule = Rule('myid', prefix='', status='Enabled', expiration=None,
transition=transition)
lifecycle.append(rule)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
transition = response[0].transition
self.assertEqual(transition.days, 30)
self.assertEqual(transition.storage_class, 'GLACIER')
self.assertEqual(transition.date, None)
def test_lifecycle_multi(self):
date = '2022-10-12T00:00:00.000Z'
sc = 'GLACIER'
lifecycle = Lifecycle()
lifecycle.add_rule("1", "1/", "Enabled", 1)
lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2))
lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date))
lifecycle.add_rule("4", "4/", "Enabled", None,
Transition(days=4, storage_class=sc))
lifecycle.add_rule("5", "5/", "Enabled", None,
Transition(date=date, storage_class=sc))
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
if rule.id == "1":
self.assertEqual(rule.prefix, "1/")
self.assertEqual(rule.expiration.days, 1)
elif rule.id == "2":
self.assertEqual(rule.prefix, "2/")
self.assertEqual(rule.expiration.days, 2)
elif rule.id == "3":
self.assertEqual(rule.prefix, "3/")
self.assertEqual(rule.expiration.date, date)
elif rule.id == "4":
self.assertEqual(rule.prefix, "4/")
self.assertEqual(rule.transition.days, 4)
self.assertEqual(rule.transition.storage_class, sc)
elif rule.id == "5":
self.assertEqual(rule.prefix, "5/")
self.assertEqual(rule.transition.date, date)
self.assertEqual(rule.transition.storage_class, sc)
else:
self.fail("unexpected id %s" % rule.id)
def test_lifecycle_jp(self):
# test lifecycle with Japanese prefix
name = "Japanese files"
prefix = "日本語/"
days = 30
lifecycle = Lifecycle()
lifecycle.add_rule(name, prefix, "Enabled", days)
# set the lifecycle
self.bucket.configure_lifecycle(lifecycle)
# read the lifecycle back
readlifecycle = self.bucket.get_lifecycle_config();
for rule in readlifecycle:
self.assertEqual(rule.id, name)
self.assertEqual(rule.expiration.days, days)
#Note: Boto seems correct? AWS seems broken?
#self.assertEqual(rule.prefix, prefix)
def test_lifecycle_with_defaults(self):
lifecycle = Lifecycle()
lifecycle.add_rule(expiration=30)
self.assertTrue(self.bucket.configure_lifecycle(lifecycle))
response = self.bucket.get_lifecycle_config()
self.assertEqual(len(response), 1)
actual_lifecycle = response[0]
self.assertNotEqual(len(actual_lifecycle.id), 0)
self.assertEqual(actual_lifecycle.prefix, '')
def test_lifecycle_rule_xml(self):
# create a rule directly with id, prefix defaults
rule = Rule(status='Enabled', expiration=30)
s = rule.to_xml()
# Confirm no ID is set in the rule.
self.assertEqual(s.find("<ID>"), -1)
# Confirm Prefix is '' and not set to 'None'
self.assertNotEqual(s.find("<Prefix></Prefix>"), -1)
| bsd-3-clause |
Ballz0fSteel/Umeko | lib/youtube_dl/extractor/ted.py | 16 | 11976 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
try_get,
)
class TEDIE(InfoExtractor):
IE_NAME = 'ted'
_VALID_URL = r'''(?x)
(?P<proto>https?://)
(?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/
(
(?P<type_playlist>playlists(?:/\d+)?) # We have a playlist
|
((?P<type_talk>talks)) # We have a simple talk
|
(?P<type_watch>watch)/[^/]+/[^/]+
)
(/lang/(.*?))? # The url may contain the language
/(?P<name>[\w-]+) # Here goes the name and then ".html"
.*)$
'''
_TESTS = [{
'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
'md5': '0de43ac406aa3e4ea74b66c9c7789b13',
'info_dict': {
'id': '102',
'ext': 'mp4',
'title': 'The illusion of consciousness',
'description': ('Philosopher Dan Dennett makes a compelling '
'argument that not only don\'t we understand our own '
'consciousness, but that half the time our brains are '
'actively fooling us.'),
'uploader': 'Dan Dennett',
'width': 853,
'duration': 1308,
}
}, {
'url': 'http://www.ted.com/watch/ted-institute/ted-bcg/vishal-sikka-the-beauty-and-power-of-algorithms',
'md5': 'b899ac15e345fb39534d913f7606082b',
'info_dict': {
'id': 'tSVI8ta_P4w',
'ext': 'mp4',
'title': 'Vishal Sikka: The beauty and power of algorithms',
'thumbnail': r're:^https?://.+\.jpg',
'description': 'md5:6261fdfe3e02f4f579cbbfc00aff73f4',
'upload_date': '20140122',
'uploader_id': 'TEDInstitute',
'uploader': 'TED Institute',
},
'add_ie': ['Youtube'],
}, {
'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
'md5': '71b3ab2f4233012dce09d515c9c39ce2',
'info_dict': {
'id': '1972',
'ext': 'mp4',
'title': 'Be passionate. Be courageous. Be your best.',
'uploader': 'Gabby Giffords and Mark Kelly',
'description': 'md5:5174aed4d0f16021b704120360f72b92',
'duration': 1128,
},
}, {
'url': 'http://www.ted.com/playlists/who_are_the_hackers',
'info_dict': {
'id': '10',
'title': 'Who are the hackers?',
},
'playlist_mincount': 6,
}, {
# contains a youtube video
'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
'add_ie': ['Youtube'],
'info_dict': {
'id': '_ZG8HBuDjgc',
'ext': 'webm',
'title': 'Douglas Adams: Parrots the Universe and Everything',
'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
'uploader': 'University of California Television (UCTV)',
'uploader_id': 'UCtelevision',
'upload_date': '20080522',
},
'params': {
'skip_download': True,
},
}, {
# YouTube video
'url': 'http://www.ted.com/talks/jeffrey_kluger_the_sibling_bond',
'add_ie': ['Youtube'],
'info_dict': {
'id': 'aFBIPO-P7LM',
'ext': 'mp4',
'title': 'The hidden power of siblings: Jeff Kluger at TEDxAsheville',
'description': 'md5:3d7a4f50d95ca5dd67104e2a20f43fe1',
'uploader': 'TEDx Talks',
'uploader_id': 'TEDxTalks',
'upload_date': '20111216',
},
'params': {
'skip_download': True,
},
}]
_NATIVE_FORMATS = {
'low': {'width': 320, 'height': 180},
'medium': {'width': 512, 'height': 288},
'high': {'width': 854, 'height': 480},
}
def _extract_info(self, webpage):
info_json = self._search_regex(
r'(?s)q\(\s*"\w+.init"\s*,\s*({.+})\)\s*</script>',
webpage, 'info json')
return json.loads(info_json)
def _real_extract(self, url):
m = re.match(self._VALID_URL, url, re.VERBOSE)
if m.group('type').startswith('embed'):
desktop_url = m.group('proto') + 'www' + m.group('urlmain')
return self.url_result(desktop_url, 'TED')
name = m.group('name')
if m.group('type_talk'):
return self._talk_info(url, name)
elif m.group('type_watch'):
return self._watch_info(url, name)
else:
return self._playlist_videos_info(url, name)
def _playlist_videos_info(self, url, name):
'''Returns the videos of the playlist'''
webpage = self._download_webpage(url, name,
'Downloading playlist webpage')
info = self._extract_info(webpage)
playlist_info = try_get(
info, lambda x: x['__INITIAL_DATA__']['playlist'],
dict) or info['playlist']
playlist_entries = [
self.url_result('http://www.ted.com/talks/' + talk['slug'], self.ie_key())
for talk in try_get(
info, lambda x: x['__INITIAL_DATA__']['talks'],
dict) or info['talks']
]
return self.playlist_result(
playlist_entries,
playlist_id=compat_str(playlist_info['id']),
playlist_title=playlist_info['title'])
def _talk_info(self, url, video_name):
webpage = self._download_webpage(url, video_name)
info = self._extract_info(webpage)
talk_info = try_get(
info, lambda x: x['__INITIAL_DATA__']['talks'][0],
dict) or info['talks'][0]
title = talk_info['title'].strip()
external = talk_info.get('external')
if external:
service = external['service']
self.to_screen('Found video from %s' % service)
ext_url = None
if service.lower() == 'youtube':
ext_url = external.get('code')
return {
'_type': 'url',
'url': ext_url or external['uri'],
}
native_downloads = try_get(
talk_info, lambda x: x['downloads']['nativeDownloads'],
dict) or talk_info['nativeDownloads']
formats = [{
'url': format_url,
'format_id': format_id,
'format': format_id,
} for (format_id, format_url) in native_downloads.items() if format_url is not None]
if formats:
for f in formats:
finfo = self._NATIVE_FORMATS.get(f['format_id'])
if finfo:
f.update(finfo)
player_talk = talk_info['player_talks'][0]
resources_ = player_talk.get('resources') or talk_info.get('resources')
http_url = None
for format_id, resources in resources_.items():
if format_id == 'h264':
for resource in resources:
h264_url = resource.get('file')
if not h264_url:
continue
bitrate = int_or_none(resource.get('bitrate'))
formats.append({
'url': h264_url,
'format_id': '%s-%sk' % (format_id, bitrate),
'tbr': bitrate,
})
if re.search(r'\d+k', h264_url):
http_url = h264_url
elif format_id == 'rtmp':
streamer = talk_info.get('streamer')
if not streamer:
continue
for resource in resources:
formats.append({
'format_id': '%s-%s' % (format_id, resource.get('name')),
'url': streamer,
'play_path': resource['file'],
'ext': 'flv',
'width': int_or_none(resource.get('width')),
'height': int_or_none(resource.get('height')),
'tbr': int_or_none(resource.get('bitrate')),
})
elif format_id == 'hls':
formats.extend(self._extract_m3u8_formats(
resources.get('stream'), video_name, 'mp4', m3u8_id=format_id, fatal=False))
m3u8_formats = list(filter(
lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none',
formats))
if http_url:
for m3u8_format in m3u8_formats:
bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None)
if not bitrate:
continue
f = m3u8_format.copy()
f.update({
'url': re.sub(r'\d+k', bitrate, http_url),
'format_id': m3u8_format['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
formats.append(f)
audio_download = talk_info.get('audioDownload')
if audio_download:
formats.append({
'url': audio_download,
'format_id': 'audio',
'vcodec': 'none',
})
self._sort_formats(formats)
video_id = compat_str(talk_info['id'])
return {
'id': video_id,
'title': title,
'uploader': player_talk.get('speaker') or talk_info.get('speaker'),
'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'),
'description': self._og_search_description(webpage),
'subtitles': self._get_subtitles(video_id, talk_info),
'formats': formats,
'duration': talk_info.get('duration'),
}
def _get_subtitles(self, video_id, talk_info):
sub_lang_list = {}
for language in try_get(
talk_info,
(lambda x: x['downloads']['languages'],
lambda x: x['languages']), list):
lang_code = language.get('languageCode') or language.get('ianaCode')
if not lang_code:
continue
sub_lang_list[lang_code] = [
{
'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, lang_code, ext),
'ext': ext,
}
for ext in ['ted', 'srt']
]
return sub_lang_list
def _watch_info(self, url, name):
webpage = self._download_webpage(url, name)
config_json = self._html_search_regex(
r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>',
webpage, 'config', default=None)
if not config_json:
embed_url = self._search_regex(
r"<iframe[^>]+class='pages-video-embed__video__object'[^>]+src='([^']+)'", webpage, 'embed url')
return self.url_result(self._proto_relative_url(embed_url))
config = json.loads(config_json)['config']
video_url = config['video']['url']
thumbnail = config.get('image', {}).get('url')
title = self._html_search_regex(
r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title')
description = self._html_search_regex(
[
r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>',
r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>',
],
webpage, 'description', fatal=False)
return {
'id': name,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'description': description,
}
| gpl-3.0 |
cbrewster/servo | tests/wpt/web-platform-tests/tools/pywebsocket/mod_pywebsocket/util.py | 23 | 14116 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket utilities."""
import array
import errno
# Import hash classes from a module available and recommended for each Python
# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
# hashlib module in Python 2.6.
try:
import hashlib
md5_hash = hashlib.md5
sha1_hash = hashlib.sha1
except ImportError:
import md5
import sha
md5_hash = md5.md5
sha1_hash = sha.sha
from six.moves import StringIO
import logging
import os
import re
import socket
import traceback
import zlib
try:
from mod_pywebsocket import fast_masking
except ImportError:
pass
def get_stack_trace():
"""Get the current stack trace as string.
This is needed to support Python 2.3.
TODO: Remove this when we only support Python 2.4 and above.
Use traceback.format_exc instead.
"""
out = StringIO()
traceback.print_exc(file=out)
return out.getvalue()
def prepend_message_to_exception(message, exc):
"""Prepend message to the exception."""
exc.args = (message + str(exc),)
return
def __translate_interp(interp, cygwin_path):
"""Translate interp program path for Win32 python to run cygwin program
(e.g. perl). Note that it doesn't support path that contains space,
which is typically true for Unix, where #!-script is written.
For Win32 python, cygwin_path is a directory of cygwin binaries.
Args:
interp: interp command line
cygwin_path: directory name of cygwin binary, or None
Returns:
translated interp command line.
"""
if not cygwin_path:
return interp
m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
if m:
cmd = os.path.join(cygwin_path, m.group(1))
return cmd + m.group(2)
return interp
def get_script_interp(script_path, cygwin_path=None):
r"""Get #!-interpreter command line from the script.
It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
it could run "/usr/bin/perl -wT hello.pl".
When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
"/usr/bin/perl" to "<cygwin_path>\perl.exe".
Args:
script_path: pathname of the script
cygwin_path: directory name of cygwin binary, or None
Returns:
#!-interpreter command line, or None if it is not #!-script.
"""
fp = open(script_path)
line = fp.readline()
fp.close()
m = re.match('^#!(.*)', line)
if m:
return __translate_interp(m.group(1), cygwin_path)
return None
def wrap_popen3_for_win(cygwin_path):
"""Wrap popen3 to support #!-script on Windows.
Args:
cygwin_path: path for cygwin binary if command path is needed to be
translated. None if no translation required.
"""
__orig_popen3 = os.popen3
def __wrap_popen3(cmd, mode='t', bufsize=-1):
cmdline = cmd.split(' ')
interp = get_script_interp(cmdline[0], cygwin_path)
if interp:
cmd = interp + ' ' + cmd
return __orig_popen3(cmd, mode, bufsize)
os.popen3 = __wrap_popen3
def hexify(s):
return ' '.join(map(lambda x: '%02x' % ord(x), s))
def get_class_logger(o):
"""Return the logging class information."""
return logging.getLogger(
'%s.%s' % (o.__class__.__module__, o.__class__.__name__))
class NoopMasker(object):
"""A NoOp masking object.
This has the same interface as RepeatedXorMasker but just returns
the string passed in without making any change.
"""
def __init__(self):
"""NoOp."""
pass
def mask(self, s):
"""NoOp."""
return s
class RepeatedXorMasker(object):
"""A masking object that applies XOR on the string.
Applies XOR on the string given to mask method with the masking bytes
given to the constructor repeatedly. This object remembers the position
in the masking bytes the last mask method call ended and resumes from
that point on the next mask method call.
"""
def __init__(self, masking_key):
self._masking_key = masking_key
self._masking_key_index = 0
def _mask_using_swig(self, s):
"""Perform the mask via SWIG."""
masked_data = fast_masking.mask(
s, self._masking_key, self._masking_key_index)
self._masking_key_index = (
(self._masking_key_index + len(s)) % len(self._masking_key))
return masked_data
def _mask_using_array(self, s):
"""Perform the mask via python."""
result = array.array('B')
result.fromstring(s)
# Use temporary local variables to eliminate the cost to access
# attributes
masking_key = map(ord, self._masking_key)
masking_key_size = len(masking_key)
masking_key_index = self._masking_key_index
for i in xrange(len(result)):
result[i] ^= masking_key[masking_key_index]
masking_key_index = (masking_key_index + 1) % masking_key_size
self._masking_key_index = masking_key_index
return result.tostring()
if 'fast_masking' in globals():
mask = _mask_using_swig
else:
mask = _mask_using_array
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
# deflate library. DICTID won't be added as far as we don't set dictionary.
# LZ77 window of 32K will be used for both compression and decompression.
# For decompression, we can just use 32K to cover any windows size. For
# compression, we use 32K so receivers must use 32K.
#
# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
# to decode.
#
# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
# Python. See also RFC1950 (ZLIB 3.3).
class _Deflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._compress = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
def compress(self, bytes):
compressed_bytes = self._compress.compress(bytes)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_flush(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
def compress_and_finish(self, bytes):
compressed_bytes = self._compress.compress(bytes)
compressed_bytes += self._compress.flush(zlib.Z_FINISH)
self._logger.debug('Compress input %r', bytes)
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
class _Inflater(object):
def __init__(self, window_bits):
self._logger = get_class_logger(self)
self._window_bits = window_bits
self._unconsumed = ''
self.reset()
def decompress(self, size):
if not (size == -1 or size > 0):
raise Exception('size must be -1 or positive')
data = ''
while True:
if size == -1:
data += self._decompress.decompress(self._unconsumed)
# See Python bug http://bugs.python.org/issue12050 to
# understand why the same code cannot be used for updating
# self._unconsumed for here and else block.
self._unconsumed = ''
else:
data += self._decompress.decompress(
self._unconsumed, size - len(data))
self._unconsumed = self._decompress.unconsumed_tail
if self._decompress.unused_data:
# Encountered a last block (i.e. a block with BFINAL = 1) and
# found a new stream (unused_data). We cannot use the same
# zlib.Decompress object for the new stream. Create a new
# Decompress object to decompress the new one.
#
# It's fine to ignore unconsumed_tail if unused_data is not
# empty.
self._unconsumed = self._decompress.unused_data
self.reset()
if size >= 0 and len(data) == size:
# data is filled. Don't call decompress again.
break
else:
# Re-invoke Decompress.decompress to try to decompress all
# available bytes before invoking read which blocks until
# any new byte is available.
continue
else:
# Here, since unused_data is empty, even if unconsumed_tail is
# not empty, bytes of requested length are already in data. We
# don't have to "continue" here.
break
if data:
self._logger.debug('Decompressed %r', data)
return data
def append(self, data):
self._logger.debug('Appended %r', data)
self._unconsumed += data
def reset(self):
self._logger.debug('Reset')
self._decompress = zlib.decompressobj(-self._window_bits)
# Compresses/decompresses given octets using the method introduced in RFC1979.
class _RFC1979Deflater(object):
"""A compressor class that applies DEFLATE to given byte sequence and
flushes using the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits, no_context_takeover):
self._deflater = None
if window_bits is None:
window_bits = zlib.MAX_WBITS
self._window_bits = window_bits
self._no_context_takeover = no_context_takeover
def filter(self, bytes, end=True, bfinal=False):
if self._deflater is None:
self._deflater = _Deflater(self._window_bits)
if bfinal:
result = self._deflater.compress_and_finish(bytes)
# Add a padding block with BFINAL = 0 and BTYPE = 0.
result = result + chr(0)
self._deflater = None
return result
result = self._deflater.compress_and_flush(bytes)
if end:
# Strip last 4 octets which is LEN and NLEN field of a
# non-compressed block added for Z_SYNC_FLUSH.
result = result[:-4]
if self._no_context_takeover and end:
self._deflater = None
return result
class _RFC1979Inflater(object):
"""A decompressor class a la RFC1979.
A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
"""
def __init__(self, window_bits=zlib.MAX_WBITS):
self._inflater = _Inflater(window_bits)
def filter(self, bytes):
# Restore stripped LEN and NLEN field of a non-compressed block added
# for Z_SYNC_FLUSH.
self._inflater.append(bytes + '\x00\x00\xff\xff')
return self._inflater.decompress(-1)
class DeflateSocket(object):
"""A wrapper class for socket object to intercept send and recv to perform
deflate compression and decompression transparently.
"""
# Size of the buffer passed to recv to receive compressed data.
_RECV_SIZE = 4096
def __init__(self, socket):
self._socket = socket
self._logger = get_class_logger(self)
self._deflater = _Deflater(zlib.MAX_WBITS)
self._inflater = _Inflater(zlib.MAX_WBITS)
def recv(self, size):
"""Receives data from the socket specified on the construction up
to the specified size. Once any data is available, returns it even
if it's smaller than the specified size.
"""
# TODO(tyoshino): Allow call with size=0. It should block until any
# decompressed data is available.
if size <= 0:
raise Exception('Non-positive size passed')
while True:
data = self._inflater.decompress(size)
if len(data) != 0:
return data
read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
if not read_data:
return ''
self._inflater.append(read_data)
def sendall(self, bytes):
self.send(bytes)
def send(self, bytes):
self._socket.sendall(self._deflater.compress_and_flush(bytes))
return len(bytes)
# vi:sts=4 sw=4 et
| mpl-2.0 |
sachdevs/rmc | models/rating.py | 8 | 3818 | import json
import logging
import mongoengine as me
import rmc.shared.util as util
class AggregateRating(me.EmbeddedDocument):
rating = me.FloatField(min_value=0.0, max_value=1.0, default=0.0)
count = me.IntField(min_value=0, default=0)
sorting_score_positive = me.FloatField(
min_value=0.0, max_value=1.0, default=0.0)
sorting_score_negative = me.FloatField(
min_value=0.0, max_value=1.0, default=0.0)
def debug_logging(self, func_name):
# TODO(Sandy): Temporary debugging for over 100% average rating bug
if self.rating > 1:
logging.warn(
"%s: update_sorting_score will fail" % (func_name) +
" self.count=%s self.rating=%s" % (self.count, self.rating)
)
@property
def num_approves(self):
"""Returns the number of users who selected "yes" for this rating."""
return int(round(self.rating * self.count))
def update_sorting_score(self):
self.sorting_score_positive = util.get_sorting_score(
self.rating, self.count)
self.sorting_score_negative = util.get_sorting_score(
1 - self.rating, self.count)
def add_rating(self, rating):
self.rating = float(self.num_approves + rating) / (self.count + 1)
self.count += 1
# TODO(Sandy): Temporary debugging
self.debug_logging("add_rating(%s)" % (rating))
self.update_sorting_score()
def remove_rating(self, rating):
if self.count == 0:
logging.warn(
"AggregateRating: called remove_rating with count = 0")
return
if self.count == 1:
self.rating = 0.0
else:
self.rating = float(self.num_approves - rating) / (self.count - 1)
self.count -= 1
# TODO(Sandy): Temporary debugging
self.debug_logging("remove_rating(%s)" % (rating))
self.update_sorting_score()
def add_aggregate_rating(self, ar):
if ar.count == 0:
return
total = ar.rating * ar.count
self.rating = (float(self.num_approves + total) /
(self.count + ar.count))
self.count += ar.count
# TODO(Sandy): Temporary debugging
self.debug_logging("add_aggregate_rating(%s)" % (ar))
self.update_sorting_score()
def to_dict(self):
return {
'rating': self.rating,
'count': self.count,
}
def to_json(self):
return json.dumps(self.to_dict())
def update_aggregate_after_replacement(self, old_value, new_value):
if old_value is None and new_value is None:
# Rating not changed
pass
elif old_value is None:
# New rating, add new_value to the aggregate
self.add_rating(new_value)
elif new_value is None:
# Removed a rating, remove old_value from the aggregate
self.remove_rating(old_value)
elif old_value != new_value:
# Modified a rating, removing old_value and add new_value to the
# aggregate
self.remove_rating(old_value)
self.add_rating(new_value)
@classmethod
def from_json(cls, json_str):
obj = json.loads(json_str)
return cls(**obj)
# TODO(david): Does not make sense to make aggregate rating from one rating
@classmethod
def from_single_rating(cls, value):
return cls(rating=value, count=1)
def get_overall_rating(ar_ratings):
sum_ratings = sum(r['rating'] * r['count'] for r in ar_ratings)
num_ratings = sum(r['count'] for r in ar_ratings)
return AggregateRating(
count=max(r['count'] for r in ar_ratings) if ar_ratings else 0,
rating=sum_ratings / max(num_ratings, 1),
)
| mit |
stevenmizuno/QGIS | python/plugins/processing/tests/GdalAlgorithmsTest.py | 9 | 5256 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GdalAlgorithmTests.py
---------------------
Date : January 2016
Copyright : (C) 2016 by Matthias Kuhn
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Matthias Kuhn'
__date__ = 'January 2016'
__copyright__ = '(C) 2016, Matthias Kuhn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = ':%H$'
import AlgorithmsTestBase
from processing.algs.gdal.OgrToPostGis import OgrToPostGis
from processing.algs.gdal.GdalUtils import GdalUtils
from qgis.core import QgsProcessingContext
import nose2
import os
import shutil
import tempfile
from qgis.testing import (
start_app,
unittest
)
testDataPath = os.path.join(os.path.dirname(__file__), 'testdata')
class TestGdalAlgorithms(unittest.TestCase, AlgorithmsTestBase.AlgorithmsTest):
@classmethod
def setUpClass(cls):
start_app()
from processing.core.Processing import Processing
Processing.initialize()
cls.cleanup_paths = []
@classmethod
def tearDownClass(cls):
for path in cls.cleanup_paths:
shutil.rmtree(path)
def test_definition_file(self):
return 'gdal_algorithm_tests.yaml'
def testOgrLayerNameExtraction(self):
outdir = tempfile.mkdtemp()
self.cleanup_paths.append(outdir)
def _copyFile(dst):
shutil.copyfile(os.path.join(testDataPath, 'custom', 'grass7', 'weighted.csv'), dst)
# OGR provider - single layer
_copyFile(os.path.join(outdir, 'a.csv'))
name = GdalUtils.ogrLayerName(outdir)
self.assertEqual(name, 'a')
# OGR provider - multiple layers
_copyFile(os.path.join(outdir, 'b.csv'))
name1 = GdalUtils.ogrLayerName(outdir + '|layerid=0')
name2 = GdalUtils.ogrLayerName(outdir + '|layerid=1')
self.assertEqual(sorted([name1, name2]), ['a', 'b'])
name = GdalUtils.ogrLayerName(outdir + '|layerid=2')
self.assertIsNone(name)
# OGR provider - layername takes precedence
name = GdalUtils.ogrLayerName(outdir + '|layername=f')
self.assertEqual(name, 'f')
name = GdalUtils.ogrLayerName(outdir + '|layerid=0|layername=f')
self.assertEqual(name, 'f')
name = GdalUtils.ogrLayerName(outdir + '|layername=f|layerid=0')
self.assertEqual(name, 'f')
# SQLiite provider
name = GdalUtils.ogrLayerName('dbname=\'/tmp/x.sqlite\' table="t" (geometry) sql=')
self.assertEqual(name, 't')
# PostgreSQL provider
name = GdalUtils.ogrLayerName('port=5493 sslmode=disable key=\'edge_id\' srid=0 type=LineString table="city_data"."edge" (geom) sql=')
self.assertEqual(name, 'city_data.edge')
class TestGdalOgrToPostGis(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start_app()
from processing.core.Processing import Processing
Processing.initialize()
@classmethod
def tearDownClass(cls):
pass
# See https://issues.qgis.org/issues/15706
def test_getConnectionString(self):
obj = OgrToPostGis()
obj.initAlgorithm({})
parameters = {}
context = QgsProcessingContext()
# NOTE: defaults are debatable, see
# https://github.com/qgis/QGIS/pull/3607#issuecomment-253971020
self.assertEqual(obj.getConnectionString(parameters, context),
"host=localhost port=5432 active_schema=public")
parameters['HOST'] = 'remote'
self.assertEqual(obj.getConnectionString(parameters, context),
"host=remote port=5432 active_schema=public")
parameters['HOST'] = ''
self.assertEqual(obj.getConnectionString(parameters, context),
"port=5432 active_schema=public")
parameters['PORT'] = '5555'
self.assertEqual(obj.getConnectionString(parameters, context),
"port=5555 active_schema=public")
parameters['PORT'] = ''
self.assertEqual(obj.getConnectionString(parameters, context),
"active_schema=public")
parameters['USER'] = 'usr'
self.assertEqual(obj.getConnectionString(parameters, context),
"active_schema=public user=usr")
parameters['PASSWORD'] = 'pwd'
self.assertEqual(obj.getConnectionString(parameters, context),
"password=pwd active_schema=public user=usr")
if __name__ == '__main__':
nose2.main()
| gpl-2.0 |
axilleas/ansible | lib/ansible/plugins/callback/mail.py | 114 | 4572 | # -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import smtplib
import json
from ansible.plugins.callback import CallbackBase
def mail(subject='Ansible error mail', sender=None, to=None, cc=None, bcc=None, body=None, smtphost=None):
if sender is None:
sender='<root>'
if to is None:
to='root'
if smtphost is None:
smtphost=os.getenv('SMTPHOST', 'localhost')
if body is None:
body = subject
smtp = smtplib.SMTP(smtphost)
content = 'From: %s\n' % sender
content += 'To: %s\n' % to
if cc:
content += 'Cc: %s\n' % cc
content += 'Subject: %s\n\n' % subject
content += body
addresses = to.split(',')
if cc:
addresses += cc.split(',')
if bcc:
addresses += bcc.split(',')
for address in addresses:
smtp.sendmail(sender, address, content)
smtp.quit()
class CallbackModule(CallbackBase):
"""
This Ansible callback plugin mails errors to interested parties.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'mail'
def v2_runner_on_failed(self, res, ignore_errors=False):
host = res._host.get_name()
if ignore_errors:
return
sender = '"Ansible: %s" <root>' % host
attach = res._task.action
if 'invocation' in res._result:
attach = "%s: %s" % (res._result['invocation']['module_name'], json.dumps(res._result['invocation']['module_args']))
subject = 'Failed: %s' % attach
body = 'The following task failed for host ' + host + ':\n\n%s\n\n' % attach
if 'stdout' in res._result.keys() and res._result['stdout']:
subject = res._result['stdout'].strip('\r\n').split('\n')[-1]
body += 'with the following output in standard output:\n\n' + res._result['stdout'] + '\n\n'
if 'stderr' in res._result.keys() and res._result['stderr']:
subject = res['stderr'].strip('\r\n').split('\n')[-1]
body += 'with the following output in standard error:\n\n' + res._result['stderr'] + '\n\n'
if 'msg' in res._result.keys() and res._result['msg']:
subject = res._result['msg'].strip('\r\n').split('\n')[0]
body += 'with the following message:\n\n' + res._result['msg'] + '\n\n'
body += 'A complete dump of the error:\n\n' + self._dump_results(res._result)
mail(sender=sender, subject=subject, body=body)
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
res = result._result
sender = '"Ansible: %s" <root>' % host
if isinstance(res, basestring):
subject = 'Unreachable: %s' % res.strip('\r\n').split('\n')[-1]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + res
else:
subject = 'Unreachable: %s' % res['msg'].strip('\r\n').split('\n')[0]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + \
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
res = result._result
sender = '"Ansible: %s" <root>' % host
if isinstance(res, basestring):
subject = 'Async failure: %s' % res.strip('\r\n').split('\n')[-1]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + res
else:
subject = 'Async failure: %s' % res['msg'].strip('\r\n').split('\n')[0]
body = 'An error occurred for host ' + host + ' with the following message:\n\n' + \
res['msg'] + '\n\nA complete dump of the error:\n\n' + str(res)
mail(sender=sender, subject=subject, body=body)
| gpl-3.0 |
racitup/django-currencies | example/settings.py | 2 | 2709 | """
Django settings for example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'YOUR_SECRET_KEY'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.tz",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"currencies.context_processors.currencies",
)
# Application definition
PROJECT_APPS = [
'currencies',
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.contenttypes',
] + PROJECT_APPS
import django
if django.VERSION < (1, 7):
INSTALLED_APPS += [
'south',
]
MIDDLEWARE_CLASSES = (
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'example.urls'
SITE_ID = 1
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
OPENEXCHANGERATES_APP_ID = "38aceb88e3154a649cf9b0f6e4214598"
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.