repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
kant/inasafe | safe/metadata/test/test_hazard_metadata.py | 8 | 1391 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Exception Classes.**
Custom exception classes for the IS application.
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '12/10/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from unittest import TestCase
from safe.common.utilities import unique_filename
from safe.metadata import HazardLayerMetadata
class TestHazardMetadata(TestCase):
def test_standard_properties(self):
metadata = HazardLayerMetadata(unique_filename())
with self.assertRaises(KeyError):
metadata.get_property('non_existing_key')
# from BaseMetadata
metadata.get_property('email')
# from HazardLayerMetadata
metadata.get_property('hazard')
metadata.get_property('hazard_category')
metadata.get_property('continuous_hazard_unit')
metadata.get_property('vector_hazard_classification')
metadata.get_property('raster_hazard_classification')
| gpl-3.0 |
Moriadry/tensorflow | tensorflow/contrib/metrics/python/metrics/classification_test.py | 125 | 4623 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metrics.classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.metrics.python.metrics import classification
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ClassificationTest(test.TestCase):
def testAccuracy1D(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DBool(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.bool, shape=[None])
labels = array_ops.placeholder(dtypes.bool, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DInt64(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int64, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0]})
self.assertEqual(result, 0.5)
def testAccuracy1DString(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.string, shape=[None])
labels = array_ops.placeholder(dtypes.string, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(
acc,
feed_dict={pred: ['a', 'b', 'a', 'c'],
labels: ['a', 'c', 'b', 'c']})
self.assertEqual(result, 0.5)
def testAccuracyDtypeMismatch(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int64, shape=[None])
classification.accuracy(pred, labels)
def testAccuracyFloatLabels(self):
with self.assertRaises(ValueError):
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.float32, shape=[None])
classification.accuracy(pred, labels)
def testAccuracy1DWeighted(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[None])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 1],
labels: [1, 1, 0, 1],
weights: [3.0, 1.0, 2.0, 0.0]
})
self.assertEqual(result, 0.5)
def testAccuracy1DWeightedBroadcast(self):
with self.test_session() as session:
pred = array_ops.placeholder(dtypes.int32, shape=[None])
labels = array_ops.placeholder(dtypes.int32, shape=[None])
weights = array_ops.placeholder(dtypes.float32, shape=[])
acc = classification.accuracy(pred, labels)
result = session.run(acc,
feed_dict={
pred: [1, 0, 1, 0],
labels: [1, 1, 0, 0],
weights: 3.0,
})
self.assertEqual(result, 0.5)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Distrotech/clamav | contrib/phishing/regex_opt.py | 14 | 1170 | #!/usr/bin/env python
def strlen(a,b):
if len(a)<len(b):
return -1;
elif len(a)>len(b):
return 1;
else:
return 0;
def getcommon_prefix(a,b):
if a==b:
return b;
if a[:-1]==b[:-1]:
return a[:-1];
else:
return ""
fil = file("iana_tld.h")
left = fil.read().split("(")
out=[]
for i in range(1,len(left)):
right = left[i].split(")")
regex_split = right[0].split("|")
regex_split.sort()
regex_split.sort(strlen)
prefix=''
prefixlen=0;
c_map=''
list=[]
for val in regex_split:
if val[:prefixlen] == prefix:
if len(val) == (prefixlen+1):
c_map = c_map+val[prefixlen]
else:
if len(c_map)>1:
c_map = "["+c_map+"]"
if len(prefix+c_map)>0:
list.append(prefix+c_map)
prefix = val[:-1]
prefixlen=len(prefix)
c_map=val[prefixlen]
else:
if len(c_map)>1:
c_map = "["+c_map+"]"
list.append(prefix+c_map)
prefix = getcommon_prefix(prefix,val)
if len(prefix)==0:
prefix=val[:-1]
prefixlen=len(prefix)
c_map=val[prefixlen]
if i==1:
left0=left[0]
else:
left0=""
out.append(left0)
out.append("(")
out.append("|".join(list))
out.append(")")
out.append(right[1])
print "".join(out)
| gpl-2.0 |
shakamunyi/tensorflow | tensorflow/python/ops/tensor_array_grad.py | 71 | 9083 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
# TODO(b/31222613): These ops may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable("TensorArray")
ops.NotDifferentiable("TensorArrayGrad")
ops.NotDifferentiable("TensorArraySize")
ops.NotDifferentiable("TensorArrayClose")
ops.NotDifferentiable("TensorArrayV2")
ops.NotDifferentiable("TensorArrayGradV2")
ops.NotDifferentiable("TensorArraySizeV2")
ops.NotDifferentiable("TensorArrayCloseV2")
ops.NotDifferentiable("TensorArrayV3")
ops.NotDifferentiable("TensorArrayGradV3")
ops.NotDifferentiable("TensorArraySizeV3")
ops.NotDifferentiable("TensorArrayCloseV3")
def _GetGradSource(op_or_tensor):
"""Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow throuth the same accumulator TensorArray.
This double counting breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray*Grad is being called in, by looking at the input gradient
tensor's name, and create or lookup an accumulator gradient TensorArray
associated with this specific call. This solves any confusion and ensures
different gradients from the same forward graph get their own accumulators.
This function creates the unique label associated with the tf.gradients call
that is used to create the gradient TensorArray.
Args:
op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
A python string, the unique label associated with this particular
gradients calculation.
Raises:
ValueError: If not called within a gradients calculation.
"""
name_tokens = op_or_tensor.name.split("/")
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith("gradients")]
if not grad_pos:
raise ValueError(
"Expected op/tensor name to start with gradients (excluding scope)"
", got: %s" % op_or_tensor.name)
return "/".join(name_tokens[:grad_pos[-1] + 1])
@ops.RegisterGradient("TensorArrayRead")
@ops.RegisterGradient("TensorArrayReadV2")
@ops.RegisterGradient("TensorArrayReadV3")
def _TensorArrayReadGrad(op, grad):
"""Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
w_g = g.write(index, grad)
return [None, None, w_g.flow]
@ops.RegisterGradient("TensorArrayWrite")
@ops.RegisterGradient("TensorArrayWriteV2")
@ops.RegisterGradient("TensorArrayWriteV3")
def _TensorArrayWriteGrad(op, flow):
"""Gradient for TensorArrayWrite.
Args:
op: Forward TensorArrayWrite op.
flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
"""
# handle is the output store_handle of TensorArrayReadGrad or
# the handle output of TensorArrayWriteGrad. we must use this one.
handle = op.inputs[0]
index = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.read(index)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayGather")
@ops.RegisterGradient("TensorArrayGatherV2")
@ops.RegisterGradient("TensorArrayGatherV3")
def _TensorArrayGatherGrad(op, grad):
"""Gradient for TensorArrayGather.
Args:
op: Forward TensorArrayGather op.
grad: Gradient `Tensor` to TensorArrayGather.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
indices = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.scatter(indices, grad)
return [None, None, u_g.flow]
@ops.RegisterGradient("TensorArrayScatter")
@ops.RegisterGradient("TensorArrayScatterV2")
@ops.RegisterGradient("TensorArrayScatterV3")
def _TensorArrayScatterGrad(op, flow):
"""Gradient for TensorArrayScatter.
Args:
op: Forward TensorArrayScatter op.
flow: Gradient `Tensor` flow to TensorArrayScatter.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
indices = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.gather(indices)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayConcat")
@ops.RegisterGradient("TensorArrayConcatV2")
@ops.RegisterGradient("TensorArrayConcatV3")
def _TensorArrayConcatGrad(op, grad, unused_lengths_grad):
"""Gradient for TensorArrayConcat.
Args:
op: Forward TensorArrayConcat op.
grad: Gradient `Tensor` to TensorArrayConcat.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
flow = op.inputs[1]
lengths = op.outputs[1]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
u_g = g.split(grad, lengths=lengths)
# handle, flow_in
return [None, u_g.flow]
@ops.RegisterGradient("TensorArraySplit")
@ops.RegisterGradient("TensorArraySplitV2")
@ops.RegisterGradient("TensorArraySplitV3")
def _TensorArraySplitGrad(op, flow):
"""Gradient for TensorArraySplit.
Args:
op: Forward TensorArraySplit op.
flow: Gradient `Tensor` flow to TensorArraySplit.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = (tensor_array_ops.TensorArray(dtype=dtype, handle=handle, flow=flow,
colocate_with_first_write_call=False)
.grad(source=grad_source, flow=flow))
grad = g.concat()
# handle, value, lengths, flow_in
return [None, grad, None, flow]
| apache-2.0 |
latenitefilms/hammerspoon | scripts/docs/bin/build_docs.py | 2 | 29809 | #!/usr/bin/env -S -P/usr/bin:${PATH} python3
# -*- coding: utf-8 -*-
"""Hammerspoon API Documentation Builder"""
import argparse
import json
import os
import pprint
import sqlite3
import sys
import re
DEBUG = False
FAIL_ON_WARN = True
HAS_WARNED = False
LINT_MODE = False
LINTS = []
CHUNK_FILE = 0
CHUNK_LINE = 1
CHUNK_SIGN = 2
CHUNK_TYPE = 3
CHUNK_DESC = 4
TYPE_NAMES = ["Deprecated", "Command", "Constant", "Variable", "Function",
"Constructor", "Field", "Method"]
SECTION_NAMES = ["Parameters", "Returns", "Notes", "Examples"]
TYPE_DESC = {
"Constant": "Useful values which cannot be changed",
"Variable": "Configurable values",
"Function": "API calls offered directly by the extension",
"Method": "API calls which can only be made on an object returned by a constructor",
"Constructor": "API calls which return an object, typically one that offers API methods",
"Command": "External shell commands",
"Field": "Variables which can only be accessed from an object returned by a constructor",
"Deprecated": "API features which will be removed in an future release"
}
LINKS = [
{"name": "Website", "url": "https://www.hammerspoon.org/"},
{"name": "GitHub page",
"url": "https://github.com/Hammerspoon/hammerspoon"},
{"name": "Getting Started Guide",
"url": "https://www.hammerspoon.org/go/"},
{"name": "Spoon Plugin Documentation",
"url": "https://github.com/Hammerspoon/hammerspoon/blob/master/SPOONS.md"},
{"name": "Official Spoon repository",
"url": "https://www.hammerspoon.org/Spoons"},
{"name": "IRC channel",
"url": "irc://irc.libera.chat/#hammerspoon"},
{"name": "Mailing list",
"url": "https://groups.google.com/forum/#!forum/hammerspoon/"},
{"name": "LuaSkin API docs",
"url": "https://www.hammerspoon.org/docs/LuaSkin/"}
]
ARGUMENTS = None
def dbg(msg):
"""Print a debug message"""
if DEBUG:
print("DEBUG: %s" % msg)
def warn(msg):
"""Print a warning message"""
global HAS_WARNED
print("WARN: %s" % msg)
HAS_WARNED = True
def err(msg):
"""Print an error message"""
print("ERROR: %s" % msg)
sys.exit(1)
def find_code_files(path):
"""Find all of the code files under a path"""
code_files = []
for dirpath, _, files in os.walk(path):
dbg("Entering: %s" % dirpath)
for filename in files:
if filename.endswith(".m") or filename.endswith(".lua"):
dbg(" Found file: %s/%s" % (dirpath, filename))
code_files.append("%s/%s" % (dirpath, filename))
return code_files
def extract_docstrings(filename):
"""Find all of the docstrings in a file"""
docstrings = []
is_in_chunk = False
chunk = None
i = 0
with open(filename, "r") as filedata:
for raw_line in filedata.readlines():
i += 1
line = raw_line.strip('\n')
if line.startswith("----") or line.startswith("////"):
dbg("Skipping %s:%d - too many comment chars" % (filename, i))
continue
if line.startswith("---") or line.startswith("///"):
# We're in a chunk of docstrings
if not is_in_chunk:
# This is a new chunk
is_in_chunk = True
chunk = []
# Store the file and line number
chunk.append(filename)
chunk.append("%d" % i)
# Append the line to the current chunk
line = line.strip("/-")
if len(line) > 0 and line[0] == ' ':
line = line[1:]
chunk.append(line)
else:
# We hit a line that isn't a docstring. If we were previously
# processing docstrings, we just exited a chunk of docs, so
# store it and reset for the next chunk.
if is_in_chunk and chunk:
docstrings.append(chunk)
is_in_chunk = False
chunk = None
return docstrings
def find_module_for_item(modules, item):
"""Find the matching module for a given item"""
dbg("find_module_for_item: Searching for: %s" % item)
module = None
# We need a shortcut here for root level items
if not ARGUMENTS.standalone and item.count('.') == 1:
dbg("find_module_for_item: Using root-level shortcut")
module = "hs"
# Methods are very easy to shortcut
if item.count(':') == 1:
dbg("find_module_for_item: Using method shortcut")
module = item.split(':')[0]
if not module:
matches = []
for mod in modules:
if item.startswith(mod):
matches.append(mod)
matches.sort()
dbg("find_module_for_item: Found options: %s" % matches)
try:
module = matches[-1]
except IndexError:
err("Unable to find module for: %s" % item)
dbg("find_module_for_item: Found: %s" % module)
return module
def find_itemname_from_signature(signature):
"""Find the name of an item, from a full signature"""
return ''.join(re.split(r"[\(\[\s]", signature)[0])
def remove_method_from_itemname(itemname):
"""Return an itemname without any method name in it"""
return itemname.split(':')[0]
def find_basename_from_itemname(itemname):
"""Find the base name of an item, from its full name"""
# (where "base name" means the function/method/variable/etc name
splitchar = '.'
if ':' in itemname:
splitchar = ':'
return itemname.split(splitchar)[-1].split(' ')[0]
def get_section_from_chunk(chunk, sectionname):
"""Extract a named section of a chunk"""
section = []
in_section = False
for line in chunk:
if line == sectionname:
in_section = True
continue
if in_section:
if line == "":
# We've reached the end of the section
break
else:
section.append(line)
return section
def strip_sections_from_chunk(chunk):
"""Remove the Parameters/Returns/Notes/Examples sections from a chunk"""
stripped_chunk = []
in_section = False
for line in chunk:
if line[:-1] in SECTION_NAMES:
# We hit a section
in_section = True
continue
elif line == "":
# We hit the end of a section
in_section = False
continue
else:
if not in_section:
stripped_chunk.append(line)
return stripped_chunk
def process_docstrings(docstrings):
"""Process the docstrings into a proper structure"""
docs = {}
# First we'll find all of the modules and prepare the docs structure
for chunk in docstrings:
if chunk[2].startswith("==="):
# This is a module definition
modulename = chunk[CHUNK_SIGN].strip("= ")
dbg("process_docstrings: Module: %s at %s:%s" % (
modulename,
chunk[CHUNK_FILE],
chunk[CHUNK_LINE]))
docs[modulename] = {}
docs[modulename]["header"] = chunk
docs[modulename]["items"] = {}
# Now we'll get all of the item definitions
for chunk in docstrings:
if not chunk[2].startswith("==="):
# This is an item definition
itemname = find_itemname_from_signature(chunk[CHUNK_SIGN])
dbg("process_docstrings: Found item: %s at %s:%s" % (
itemname,
chunk[CHUNK_FILE],
chunk[CHUNK_LINE]))
modulename = find_module_for_item(list(docs.keys()), itemname)
dbg("process_docstrings: Assigning item to module: %s" %
modulename)
docs[modulename]["items"][itemname] = chunk
return docs
def process_module(modulename, raw_module):
"""Process the docstrings for a module"""
dbg("Processing module: %s" % modulename)
dbg("Header: %s" % raw_module["header"][CHUNK_DESC])
module = {}
module["name"] = modulename
module["type"] = "Module"
module["desc"] = raw_module["header"][CHUNK_DESC]
module["doc"] = '\n'.join(raw_module["header"][CHUNK_DESC:])
module["stripped_doc"] = '\n'.join(raw_module["header"][CHUNK_DESC + 1:])
module["submodules"] = []
module["items"] = [] # Deprecated
module["Function"] = []
module["Method"] = []
module["Constructor"] = []
module["Constant"] = []
module["Variable"] = []
module["Command"] = []
module["Field"] = []
# NOTE: I don't like having the deprecated type, I think we should revist
# this later and find another way to annotate deprecations
module["Deprecated"] = []
for itemname in raw_module["items"]:
dbg(" Processing item: %s" % itemname)
chunk = raw_module["items"][itemname]
if chunk[CHUNK_TYPE] not in TYPE_NAMES:
err("UNKNOWN TYPE: %s (%s)" % (chunk[CHUNK_TYPE],
pprint.pformat(chunk)))
basename = find_basename_from_itemname(itemname)
item = {}
item["name"] = basename
item["signature"] = chunk[CHUNK_SIGN]
item["def"] = chunk[CHUNK_SIGN] # Deprecated
item["type"] = chunk[CHUNK_TYPE]
item["desc"] = chunk[CHUNK_DESC]
item["doc"] = '\n'.join(chunk[CHUNK_DESC:])
item["file"] = chunk[CHUNK_FILE]
item["lineno"] = chunk[CHUNK_LINE]
for section in ["Parameters", "Returns", "Notes", "Examples"]:
if section + ':' in chunk:
item[section.lower()] = get_section_from_chunk(chunk,
section + ':')
item["stripped_doc"] = '\n'.join(strip_sections_from_chunk(chunk[CHUNK_DESC + 1:]))
module[item["type"]].append(item)
module["items"].append(item) # Deprecated
dbg(" %s" % pprint.pformat(item).replace('\n', "\n "))
# The rest of this code is only for functions/constructors/methods
if item["type"] not in ["Function", "Constructor", "Method"]:
continue
def is_actual_parameter(some_text):
return some_text.startswith(" * ")
try:
if item['desc'].startswith("Alias for [`"):
item["parameters"] = []
item["returns"] = []
item["notes"] = []
pass
else:
sig_without_return = item["signature"].split("->")[0]
sig_params = re.sub(r".*\((.*)\).*", r"\1", sig_without_return)
sig_param_arr = re.split(r',|\|', sig_params)
sig_arg_count = len(sig_param_arr)
# Check if there are more than a single line of description at the top of the function
params_index = chunk[CHUNK_DESC:].index("Parameters:")
desc_section = [x for x in chunk[CHUNK_DESC:][0:params_index] if x != '']
if len(desc_section) > 1:
message = "Function description should be a single line. Other content may belong in Notes: %s" % sig_without_return
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring function/method/constructor description should not be multiline",
"message": message,
"annotation_level": "failure"
})
# Clean up Parameters
clean_params = []
numlines = len(item["parameters"])
try:
for i in range(0, numlines):
line = item["parameters"][i]
if line.startswith(" * "):
# This is the start of a new parameter, add it to clean_params
clean_params.append(line.rstrip())
elif line.startswith(" * ") or line.startswith(" * "):
if line.startswith(" * "):
# Sub-lists should start with two spaces in GitHub Flavoured Markdown, so add in the missing space in this item
line = " " + line
# This is a sub-parameter of the previous parameter, add it to that string in clean_params
prev_clean_line = clean_params[-1]
prev_clean_line += '\n' + line.rstrip()
clean_params[-1] = prev_clean_line
else:
# This should have been on the line before
prev_clean_line = clean_params[-1]
prev_clean_line += ' ' + line.strip()
clean_params[-1] = prev_clean_line
except:
message = "PARAMETERS FORMAT ISSUE: Unable to parse Parameters for: %s" % sig_without_return
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring function/method/constructor parameter parsing error",
"message": message,
"annotation_level": "failure"
})
item["parameters"] = clean_params
# Check the number of parameters in the signature matches the number in Parameters
parameter_count = len(item["parameters"])
if parameter_count != sig_arg_count:
message = "SIGNATURE/PARAMETER COUNT MISMATCH: '%s' says %d parameters ('%s'), but Parameters section has %d entries:\n%s\n" % (sig_without_return, sig_arg_count, ','.join(sig_param_arr), parameter_count, '\n'.join(item["parameters"]))
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring signature/parameter mismatch",
"message": message,
"annotation_level": "failure"
})
# Check if we have zero items for Returns.
# This is a lint error in Hammerspoon, but in Standalone (ie Spoons) we'll let it slide and assume they meant to have no returns
if "returns" not in item:
item["returns"] = []
if len(item["returns"]) == 0 and not ARGUMENTS.standalone:
message = "RETURN COUNT ERROR: '%s' does not specify a return value" % (sig_without_return)
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring missing return value",
"message": message,
"annotation_level": "failure"
})
# Having validated the Returns, we will now remove any "None" ones
if len(item["returns"]) == 1 and item["returns"][0] == "* None":
item["returns"] = []
# Check if we have zero items for Notes
if "notes" not in item:
item["notes"] = []
# Check if we have zero items for Examples
if "examples" not in item:
item["examples"] = []
except:
message = "Unable to parse parameters for %s\n%s\n" % (item["signature"], sys.exc_info()[1])
warn(message)
LINTS.append({
"file": item["file"],
"line": int(item["lineno"]),
"title": "Docstring Parameters parse failure",
"message": message,
"annotation_level": "failure"
})
if FAIL_ON_WARN:
sys.exit(1)
return module
def strip_paragraph(text):
"""Strip <p> from the start of a string, and </p>\n from the end"""
text = text.replace("<p>", "")
text = text.replace("</p>\n", "")
return text
def process_markdown(data):
"""Pre-render GitHub-flavoured Markdown, and syntax-highlight code"""
import mistune
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang):
if not lang:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter()
return highlight(code, lexer, formatter)
md = mistune.Markdown(renderer=HighlightRenderer())
for i in range(0, len(data)):
module = data[i]
module["desc_gfm"] = md(module["desc"])
module["doc_gfm"] = md(module["doc"])
for item_type in TYPE_NAMES:
items = module[item_type]
for j in range(0, len(items)):
item = items[j]
dbg("Preparing template data for: %s" % item["def"])
item["def_gfm"] = strip_paragraph(md(item["def"]))
item["doc_gfm"] = md(item["doc"])
if item_type in ["Function", "Constructor", "Method"]:
item["parameters_gfm"] = md('\n'.join(item["parameters"]))
item["returns_gfm"] = md('\n'.join(item["returns"]))
item["notes_gfm"] = md('\n'.join(item["notes"]))
items[j] = item
# Now do the same for the deprecated 'items' list
for j in range(0, len(module["items"])):
item = module["items"][j]
item["def_gfm"] = strip_paragraph(md(item["def"]))
item["doc_gfm"] = md(item["doc"])
module["items"][j] = item
data[i] = module
return data
def do_processing(directories):
"""Run all processing steps for one or more directories"""
raw_docstrings = []
codefiles = []
processed_docstrings = []
module_tree = {}
for directory in directories:
codefiles += find_code_files(directory)
if len(codefiles) == 0:
err("No .m/.lua files found")
for filename in codefiles:
raw_docstrings += extract_docstrings(filename)
if len(raw_docstrings) == 0:
err("No docstrings found")
docs = process_docstrings(raw_docstrings)
if len(docs) == 0:
err("No modules found")
for module in docs:
dbg("Processing: %s" % module)
module_docs = process_module(module, docs[module])
module_docs["items"].sort(key=lambda item: item["name"].lower())
for item_type in TYPE_NAMES:
module_docs[item_type].sort(key=lambda item: item["name"].lower())
processed_docstrings.append(module_docs)
# Add this module to our module tree
module_parts = module.split('.')
cursor = module_tree
for part in module_parts:
if part not in cursor:
cursor[part] = {}
cursor = cursor[part]
# Iterate over the modules, consulting the module tree, to find their
# submodules
# (Note that this is done as a separate step after the above loop, to
# ensure that we know about all possible modules by this point)
i = 0
for module in processed_docstrings:
dbg("Finding submodules for: %s" % module["name"])
module_parts = module["name"].split('.')
cursor = module_tree
for part in module_parts:
cursor = cursor[part]
# cursor now points at this module, so now we can check for subs
for sub in list(cursor.keys()):
processed_docstrings[i]["submodules"].append(sub)
processed_docstrings[i]["submodules"].sort()
i += 1
processed_docstrings.sort(key=lambda module: module["name"].lower())
return processed_docstrings
def write_annotations(filepath, data):
"""Write out a JSON file with our linter errors"""
with open(filepath, "wb") as jsonfile:
jsonfile.write(json.dumps(data, indent=2,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8'))
def write_json(filepath, data):
"""Write out a JSON version of the docs"""
with open(filepath, "wb") as jsonfile:
jsonfile.write(json.dumps(data, sort_keys=True, indent=2,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8'))
def write_json_index(filepath, data):
"""Write out a JSON index of the docs"""
index = []
for item in data:
entry = {}
entry["name"] = item["name"]
entry["desc"] = item["desc"]
entry["type"] = item["type"]
index.append(entry)
for subtype in TYPE_NAMES:
for subitem in item[subtype]:
entry = {}
entry["name"] = subitem["name"]
entry["module"] = item["name"]
entry["desc"] = subitem["desc"]
entry["type"] = subitem["type"]
index.append(entry)
with open(filepath, "wb") as jsonfile:
jsonfile.write(json.dumps(index, sort_keys=True, indent=2,
separators=(',', ': '),
ensure_ascii=False).encode('utf-8'))
def write_sql(filepath, data):
"""Write out an SQLite DB of docs metadata, for Dash"""
db = sqlite3.connect(filepath)
cur = db.cursor()
try:
cur.execute("DROP TABLE searchIndex;")
except sqlite3.OperationalError:
# This table won't have existed in a blank database
pass
cur.execute("CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, "
"type TEXT, path TEXT);")
cur.execute("CREATE UNIQUE INDEX anchor ON searchIndex (name, type, "
"path);")
for module in data:
cur.execute("INSERT INTO searchIndex VALUES(NULL, '%(modname)s', "
"'Module', '%(modname)s.html');" %
{"modname": module["name"]})
for item in module["items"]:
try:
cur.execute("INSERT INTO searchIndex VALUES(NULL, "
"'%(modname)s.%(itemname)s', "
"'%(itemtype)s', '%(modname)s.html#%(itemname)s');" %
{"modname": module["name"], "itemname": item["name"],
"itemtype": item["type"]})
except:
err("DB Insert failed on %s:%s(%s)" % (module["name"], item["name"], item["type"]))
db.commit()
cur.execute("VACUUM;")
def write_templated_output(output_dir, template_dir, title, data, extension):
"""Write out a templated version of the docs"""
from jinja2 import Environment
jinja = Environment(trim_blocks=True, lstrip_blocks=True)
# Make sure we have a valid output_dir
if not os.path.isdir(output_dir):
try:
os.makedirs(output_dir)
except Exception as error:
err("Output directory is not a directory, "
"and/or can't be created: %s" % error)
# Prepare for writing index.<extensions>
try:
outfile = open(output_dir + "/index." + extension, "wb")
except Exception as error:
err("Unable to create %s: %s" % (output_dir + "/index." + extension,
error))
# Prepare for reading index.j2.<extension>
try:
tmplfile = open(template_dir + "/index.j2." + extension, "r")
except Exception as error:
err("Unable to open index.j2.%s: %s" % (extension, error))
if extension == "html":
# Re-process the doc data to convert Markdown to HTML
data = process_markdown(data)
# Write out the data as a file, for later debugging
write_json(output_dir + "/templated_docs.json", data)
# Render and write index.<extension>
template = jinja.from_string(tmplfile.read())
render = template.render(data=data, links=LINKS, title=title)
outfile.write(render.encode("utf-8"))
outfile.close()
tmplfile.close()
dbg("Wrote index." + extension)
# Render and write module docs
try:
tmplfile = open(template_dir + "/module.j2." + extension, "r")
template = jinja.from_string(tmplfile.read())
except Exception as error:
err("Unable to open module.j2.%s: %s" % (extension, error))
for module in data:
with open("%s/%s.%s" % (output_dir,
module["name"],
extension), "wb") as docfile:
render = template.render(module=module,
type_order=TYPE_NAMES,
type_desc=TYPE_DESC)
docfile.write(render.encode("utf-8"))
dbg("Wrote %s.%s" % (module["name"], extension))
tmplfile.close()
def write_html(output_dir, template_dir, title, data):
"""Write out an HTML version of the docs"""
write_templated_output(output_dir, template_dir, title, data, "html")
def write_markdown(output_dir, template_dir, title, data):
"""Write out a Markdown version of the docs"""
write_templated_output(output_dir, template_dir, title, data, "md")
def main():
"""Main entrypoint"""
global DEBUG
global ARGUMENTS
parser = argparse.ArgumentParser()
commands = parser.add_argument_group("Commands")
commands.add_argument("-v", "--validate", action="store_true",
dest="validate", default=False,
help="Ensure all docstrings are valid")
commands.add_argument("-j", "--json", action="store_true",
dest="json", default=False,
help="Output docs.json")
commands.add_argument("-s", "--sql", action="store_true",
dest="sql", default=False,
help="Output docs.sqlite")
commands.add_argument("-t", "--html", action="store_true",
dest="html", default=False,
help="Output HTML docs")
commands.add_argument("-m", "--markdown", action="store_true",
dest="markdown", default=False,
help="Output Markdown docs")
parser.add_argument("-n", "--standalone",
help="Process a single module only",
action="store_true", default=False,
dest="standalone")
parser.add_argument("-d", "--debug", help="Enable debugging output",
action="store_true", default=False,
dest="debug")
parser.add_argument("-e", "--templates", action="store",
help="Directory of HTML templates",
dest="template_dir", default="scripts/docs/templates")
parser.add_argument("-o", "--output_dir", action="store",
dest="output_dir", default="build/",
help="Directory to write outputs to")
parser.add_argument("-i", "--title", action="store",
dest="title", default="Hammerspoon",
help="Title for the index page")
parser.add_argument("-l", "--lint", action="store_true",
dest="lint_mode", default=False,
help="Run in Lint mode. No docs will be built")
parser.add_argument("DIRS", nargs=argparse.REMAINDER,
help="Directories to search")
arguments, leftovers = parser.parse_known_args()
if arguments.debug:
DEBUG = True
dbg("Arguments: %s" % arguments)
if not arguments.validate and \
not arguments.json and \
not arguments.sql and \
not arguments.html and \
not arguments.markdown and \
not arguments.lint_mode:
parser.print_help()
err("At least one of validate/json/sql/html/markdown is required.")
if len(arguments.DIRS) == 0:
parser.print_help()
err("At least one directory is required. See DIRS")
# Store global copy of our arguments
ARGUMENTS = arguments
if arguments.lint_mode:
global LINT_MODE
global FAIL_ON_WARN
LINT_MODE = True
FAIL_ON_WARN = False
results = do_processing(arguments.DIRS)
if arguments.validate:
# If we got this far, we already processed the docs, and validated them
pass
if arguments.lint_mode:
write_annotations(arguments.output_dir + "/annotations.json", LINTS)
if arguments.json:
write_json(arguments.output_dir + "/docs.json", results)
write_json_index(arguments.output_dir + "/docs_index.json", results)
if arguments.sql:
write_sql(arguments.output_dir + "/docs.sqlite", results)
if arguments.html:
write_html(arguments.output_dir + "/html/",
arguments.template_dir,
arguments.title, results)
if arguments.markdown:
write_markdown(arguments.output_dir + "/markdown/",
arguments.template_dir,
arguments.title, results)
if __name__ == "__main__":
main()
if FAIL_ON_WARN and HAS_WARNED:
sys.exit(1)
| mit |
TeachAtTUM/edx-platform | common/djangoapps/track/backends/logger.py | 26 | 1508 | """Event tracker backend that saves events to a python logger."""
from __future__ import absolute_import
import json
import logging
from django.conf import settings
from track.backends import BaseBackend
from track.utils import DateTimeJSONEncoder
log = logging.getLogger('track.backends.logger')
application_log = logging.getLogger('track.backends.application_log') # pylint: disable=invalid-name
class LoggerBackend(BaseBackend):
"""Event tracker backend that uses a python logger.
Events are logged to the INFO level as JSON strings.
"""
def __init__(self, name, **kwargs):
"""Event tracker backend that uses a python logger.
:Parameters:
- `name`: identifier of the logger, which should have
been configured using the default python mechanisms.
"""
super(LoggerBackend, self).__init__(**kwargs)
self.event_logger = logging.getLogger(name)
def send(self, event):
try:
event_str = json.dumps(event, cls=DateTimeJSONEncoder)
except UnicodeDecodeError:
application_log.exception(
"UnicodeDecodeError Event_data: %r", event
)
raise
# TODO: remove trucation of the serialized event, either at a
# higher level during the emittion of the event, or by
# providing warnings when the events exceed certain size.
event_str = event_str[:settings.TRACK_MAX_EVENT]
self.event_logger.info(event_str)
| agpl-3.0 |
morlandi/django-permissions | permissions/models.py | 1 | 6596 | # django imports
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
# permissions imports
import permissions.utils
class Permission(models.Model):
"""A permission which can be granted to users/groups and objects.
**Attributes:**
name
The unique name of the permission. This is displayed to users.
codename
The unique codename of the permission. This is used internal to
identify a permission.
content_types
The content types for which the permission is active. This can be
used to display only reasonable permissions for an object.
"""
name = models.CharField(_(u"Name"), max_length=100, unique=True)
codename = models.CharField(_(u"Codename"), max_length=100, unique=True)
content_types = models.ManyToManyField(ContentType, verbose_name=_(u"Content Types"), blank=True, related_name="content_types")
class Meta:
app_label = "permissions"
def __unicode__(self):
return "%s (%s)" % (self.name, self.codename)
class ObjectPermission(models.Model):
"""Grants permission for a role and an content object (optional).
**Attributes:**
role
The role for which the permission is granted.
permission
The permission which is granted.
content
The object for which the permission is granted.
"""
role = models.ForeignKey("Role", verbose_name=_(u"Role"), blank=True, null=True)
permission = models.ForeignKey(Permission, verbose_name=_(u"Permission"))
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"))
content_id = models.PositiveIntegerField(verbose_name=_(u"Content id"))
content = GenericForeignKey(ct_field="content_type", fk_field="content_id")
class Meta:
app_label = "permissions"
def __unicode__(self):
return "%s / %s / %s - %s" % (self.permission.name, self.role, self.content_type, self.content_id)
class ObjectPermissionInheritanceBlock(models.Model):
"""Blocks the inheritance for specific permission and object.
**Attributes:**
permission
The permission for which inheritance is blocked.
content
The object for which the inheritance is blocked.
"""
permission = models.ForeignKey(Permission, verbose_name=_(u"Permission"))
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"))
content_id = models.PositiveIntegerField(verbose_name=_(u"Content id"))
content = GenericForeignKey(ct_field="content_type", fk_field="content_id")
class Meta:
app_label = "permissions"
def __unicode__(self):
return "%s / %s - %s" % (self.permission, self.content_type, self.content_id)
class Role(models.Model):
"""A role gets permissions to do something. Principals (users and groups)
can only get permissions via roles.
**Attributes:**
name
The unique name of the role
"""
name = models.CharField(max_length=100, unique=True)
class Meta:
app_label = "permissions"
ordering = ("name", )
def __unicode__(self):
return self.name
def add_principal(self, principal, content=None):
"""Addes the given principal (user or group) ot the Role.
"""
return permissions.utils.add_role(principal, self)
def get_groups(self, content=None):
"""Returns all groups which has this role assigned. If content is given
it returns also the local roles.
"""
if content:
ctype = ContentType.objects.get_for_model(content)
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id__in=(None, content.id),
content_type__in=(None, ctype)).exclude(group=None)
else:
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id=None, content_type=None).exclude(group=None)
return [prr.group for prr in prrs]
def get_users(self, content=None):
"""Returns all users which has this role assigned. If content is given
it returns also the local roles.
"""
if content:
ctype = ContentType.objects.get_for_model(content)
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id__in=(None, content.id),
content_type__in=(None, ctype)).exclude(user=None)
else:
prrs = PrincipalRoleRelation.objects.filter(role=self,
content_id=None, content_type=None).exclude(user=None)
return [prr.user for prr in prrs]
class PrincipalRoleRelation(models.Model):
"""A role given to a principal (user or group). If a content object is
given this is a local role, i.e. the principal has this role only for this
content object. Otherwise it is a global role, i.e. the principal has
this role generally.
user
A user instance. Either a user xor a group needs to be given.
group
A group instance. Either a user xor a group needs to be given.
role
The role which is given to the principal for content.
content
The content object which gets the local role (optional).
"""
user = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_(u"User"), blank=True, null=True)
group = models.ForeignKey(Group, verbose_name=_(u"Group"), blank=True, null=True)
role = models.ForeignKey(Role, verbose_name=_(u"Role"))
content_type = models.ForeignKey(ContentType, verbose_name=_(u"Content type"), blank=True, null=True)
content_id = models.PositiveIntegerField(verbose_name=_(u"Content id"), blank=True, null=True)
content = GenericForeignKey(ct_field="content_type", fk_field="content_id")
class Meta:
app_label = "permissions"
def __unicode__(self):
if self.user:
principal = self.user.username
else:
principal = self.group
return "%s - %s" % (principal, self.role)
def get_principal(self):
"""Returns the principal.
"""
return self.user or self.group
def set_principal(self, principal):
"""Sets the principal.
"""
if isinstance(principal, User):
self.user = principal
else:
self.group = principal
principal = property(get_principal, set_principal)
| bsd-3-clause |
seberg/numpy | benchmarks/benchmarks/bench_reduce.py | 11 | 1723 | from .common import Benchmark, TYPES1, get_squares
import numpy as np
class AddReduce(Benchmark):
def setup(self):
self.squares = get_squares().values()
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in self.squares]
class AddReduceSeparate(Benchmark):
params = [[0, 1], TYPES1]
param_names = ['axis', 'type']
def setup(self, axis, typename):
self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
class AnyAll(Benchmark):
def setup(self):
# avoid np.zeros's lazy allocation that would
# cause page faults during benchmark
self.zeros = np.full(100000, 0, bool)
self.ones = np.full(100000, 1, bool)
def time_all_fast(self):
self.zeros.all()
def time_all_slow(self):
self.ones.all()
def time_any_fast(self):
self.ones.any()
def time_any_slow(self):
self.zeros.any()
class MinMax(Benchmark):
params = [np.float32, np.float64, np.intp]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.ones(20000, dtype=dtype)
def time_min(self, dtype):
np.min(self.d)
def time_max(self, dtype):
np.max(self.d)
class ArgMax(Benchmark):
params = [np.float32, bool]
param_names = ['dtype']
def setup(self, dtype):
self.d = np.zeros(200000, dtype=dtype)
def time_argmax(self, dtype):
np.argmax(self.d)
class SmallReduction(Benchmark):
def setup(self):
self.d = np.ones(100, dtype=np.float32)
def time_small(self):
np.sum(self.d)
| bsd-3-clause |
hopeall/odoo | openerp/tools/mail.py | 125 | 29474 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2012-TODAY OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
import cgi
import logging
import lxml.html
import lxml.html.clean as clean
import random
import re
import socket
import threading
import time
from email.utils import getaddresses
import openerp
from openerp.loglevels import ustr
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# HTML Sanitizer
#----------------------------------------------------------
tags_to_kill = ["script", "head", "meta", "title", "link", "style", "frame", "iframe", "base", "object", "embed"]
tags_to_remove = ['html', 'body', 'font']
# allow new semantic HTML5 tags
allowed_tags = clean.defs.tags | frozenset('article section header footer hgroup nav aside figure main'.split() + [etree.Comment])
safe_attrs = clean.defs.safe_attrs | frozenset(
['style',
'data-oe-model', 'data-oe-id', 'data-oe-field', 'data-oe-type', 'data-oe-expression', 'data-oe-translate', 'data-oe-nodeid',
'data-snippet-id', 'data-publish', 'data-id', 'data-res_id', 'data-member_id', 'data-view-id'
])
def html_sanitize(src, silent=True, strict=False, strip_style=False):
if not src:
return src
src = ustr(src, errors='replace')
logger = logging.getLogger(__name__ + '.html_sanitize')
# html encode email tags
part = re.compile(r"(<(([^a<>]|a[^<>\s])[^<>]*)@[^<>]+>)", re.IGNORECASE | re.DOTALL)
src = part.sub(lambda m: cgi.escape(m.group(1)), src)
# html encode mako tags <% ... %> to decode them later and keep them alive, otherwise they are stripped by the cleaner
src = src.replace('<%', cgi.escape('<%'))
src = src.replace('%>', cgi.escape('%>'))
kwargs = {
'page_structure': True,
'style': strip_style, # True = remove style tags/attrs
'forms': True, # remove form tags
'remove_unknown_tags': False,
'allow_tags': allowed_tags,
'comments': False,
'processing_instructions': False
}
if etree.LXML_VERSION >= (2, 3, 1):
# kill_tags attribute has been added in version 2.3.1
kwargs.update({
'kill_tags': tags_to_kill,
'remove_tags': tags_to_remove,
})
else:
kwargs['remove_tags'] = tags_to_kill + tags_to_remove
if strict:
if etree.LXML_VERSION >= (3, 1, 0):
# lxml < 3.1.0 does not allow to specify safe_attrs. We keep all attributes in order to keep "style"
kwargs.update({
'safe_attrs_only': True,
'safe_attrs': safe_attrs,
})
else:
kwargs['safe_attrs_only'] = False # keep oe-data attributes + style
kwargs['frames'] = False, # do not remove frames (embbed video in CMS blogs)
try:
# some corner cases make the parser crash (such as <SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT> in test_mail)
cleaner = clean.Cleaner(**kwargs)
cleaned = cleaner.clean_html(src)
# MAKO compatibility: $, { and } inside quotes are escaped, preventing correct mako execution
cleaned = cleaned.replace('%24', '$')
cleaned = cleaned.replace('%7B', '{')
cleaned = cleaned.replace('%7D', '}')
cleaned = cleaned.replace('%20', ' ')
cleaned = cleaned.replace('%5B', '[')
cleaned = cleaned.replace('%5D', ']')
cleaned = cleaned.replace('<%', '<%')
cleaned = cleaned.replace('%>', '%>')
except etree.ParserError, e:
if 'empty' in str(e):
return ""
if not silent:
raise
logger.warning('ParserError obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>ParserError when sanitizing</p>'
except Exception:
if not silent:
raise
logger.warning('unknown error obtained when sanitizing %r', src, exc_info=True)
cleaned = '<p>Unknown error when sanitizing</p>'
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if cleaned.startswith('<div>') and cleaned.endswith('</div>'):
cleaned = cleaned[5:-6]
return cleaned
#----------------------------------------------------------
# HTML Cleaner
#----------------------------------------------------------
def html_email_clean(html, remove=False, shorten=False, max_length=300, expand_options=None,
protect_sections=False):
""" html_email_clean: clean the html by doing the following steps:
- try to strip email quotes, by removing blockquotes or having some client-
specific heuristics
- try to strip signatures
- shorten the html to a maximum number of characters if requested
Some specific use case:
- MsOffice: ``div.style = border-top:solid;`` delimitates the beginning of
a quote; detecting by finding WordSection1 of MsoNormal
- Hotmail: ``hr.stopSpelling`` delimitates the beginning of a quote; detect
Hotmail by funding ``SkyDrivePlaceholder``
:param string html: sanitized html; tags like html or head should not
be present in the html string. This method therefore
takes as input html code coming from a sanitized source,
like fields.html.
:param boolean remove: remove the html code that is unwanted; otherwise it
is only flagged and tagged
:param boolean shorten: shorten the html; every excessing content will
be flagged as to remove
:param int max_length: if shortening, maximum number of characters before
shortening
:param dict expand_options: options for the read more link when shortening
the content.The used keys are the following:
- oe_expand_container_tag: class applied to the
container of the whole read more link
- oe_expand_container_class: class applied to the
link container (default: oe_mail_expand)
- oe_expand_container_content: content of the
container (default: ...)
- oe_expand_separator_node: optional separator, like
adding ... <br /><br /> <a ...>read more</a> (default: void)
- oe_expand_a_href: href of the read more link itself
(default: #)
- oe_expand_a_class: class applied to the <a> containing
the link itself (default: oe_mail_expand)
- oe_expand_a_content: content of the <a> (default: read more)
The formatted read more link is the following:
<cont_tag class="oe_expand_container_class">
oe_expand_container_content
if expand_options.get('oe_expand_separator_node'):
<oe_expand_separator_node/>
<a href="oe_expand_a_href" class="oe_expand_a_class">
oe_expand_a_content
</a>
</span>
"""
def _replace_matching_regex(regex, source, replace=''):
""" Replace all matching expressions in source by replace """
if not source:
return source
dest = ''
idx = 0
for item in re.finditer(regex, source):
dest += source[idx:item.start()] + replace
idx = item.end()
dest += source[idx:]
return dest
def _create_node(tag, text, tail=None, attrs={}):
new_node = etree.Element(tag)
new_node.text = text
new_node.tail = tail
for key, val in attrs.iteritems():
new_node.set(key, val)
return new_node
def _insert_new_node(node, index, new_node_tag, new_node_text, new_node_tail=None, new_node_attrs={}):
new_node = _create_node(new_node_tag, new_node_text, new_node_tail, new_node_attrs)
node.insert(index, new_node)
return new_node
def _tag_matching_regex_in_text(regex, node, new_node_tag='span', new_node_attrs={}):
text = node.text or ''
if not re.search(regex, text):
return
cur_node = node
node.text = ''
idx, iteration = 0, 0
for item in re.finditer(regex, text):
if iteration == 0:
cur_node.text = text[idx:item.start()]
else:
_insert_new_node(node, (iteration - 1) * 2 + 1, new_node_tag, text[idx:item.start()])
new_node = _insert_new_node(node, iteration * 2, new_node_tag, text[item.start():item.end()], None, new_node_attrs)
cur_node = new_node
idx = item.end()
iteration += 1
new_node = _insert_new_node(node, -1, new_node_tag, text[idx:] + (cur_node.tail or ''), None, {})
def _truncate_node(node, position, simplify_whitespaces=True):
""" Truncate a node text at a given position. This algorithm will shorten
at the end of the word whose ending character exceeds position.
:param bool simplify_whitespaces: whether to try to count all successive
whitespaces as one character. This
option should not be True when trying
to keep 'pre' consistency.
"""
if node.text is None:
node.text = ''
truncate_idx = -1
if simplify_whitespaces:
cur_char_nbr = 0
word = None
node_words = node.text.strip(' \t\r\n').split()
for word in node_words:
cur_char_nbr += len(word)
if cur_char_nbr >= position:
break
if word:
truncate_idx = node.text.find(word) + len(word)
else:
truncate_idx = position
if truncate_idx == -1 or truncate_idx > len(node.text):
truncate_idx = len(node.text)
# compose new text bits
innertext = node.text[0:truncate_idx]
outertext = node.text[truncate_idx:]
node.text = innertext
# create <span> ... <a href="#">read more</a></span> node
read_more_node = _create_node(
expand_options.get('oe_expand_container_tag', 'span'),
expand_options.get('oe_expand_container_content', ' ... '),
None,
{'class': expand_options.get('oe_expand_container_class', 'oe_mail_expand')}
)
if expand_options.get('oe_expand_separator_node'):
read_more_separator_node = _create_node(
expand_options.get('oe_expand_separator_node'),
'',
None,
{}
)
read_more_node.append(read_more_separator_node)
read_more_link_node = _create_node(
'a',
expand_options.get('oe_expand_a_content', _('read more')),
None,
{
'href': expand_options.get('oe_expand_a_href', '#'),
'class': expand_options.get('oe_expand_a_class', 'oe_mail_expand'),
}
)
read_more_node.append(read_more_link_node)
# create outertext node
overtext_node = _create_node('span', outertext)
# tag node
overtext_node.set('in_overlength', '1')
# add newly created nodes in dom
node.append(read_more_node)
node.append(overtext_node)
if expand_options is None:
expand_options = {}
if not html or not isinstance(html, basestring):
return html
html = ustr(html)
# Pre processing
# ------------------------------------------------------------
# TDE TODO: --- MAIL ORIGINAL ---: '[\-]{4,}([^\-]*)[\-]{4,}'
# html: remove encoding attribute inside tags
doctype = re.compile(r'(<[^>]*\s)(encoding=(["\'][^"\']*?["\']|[^\s\n\r>]+)(\s[^>]*|/)?>)', re.IGNORECASE | re.DOTALL)
html = doctype.sub(r"", html)
# html: ClEditor seems to love using <div><br /><div> -> replace with <br />
br_div_tags = re.compile(r'(<div>\s*<br\s*\/>\s*<\/div>)', re.IGNORECASE)
html = _replace_matching_regex(br_div_tags, html, '<br />')
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
quote_tags = re.compile(r'(\n(>)+[^\n\r]*)')
signature = re.compile(r'([-]{2,}[\s]?[\r\n]{1,2}[\s\S]+)')
for node in root.iter():
# remove all tails and replace them by a span element, because managing text and tails can be a pain in the ass
if node.tail:
tail_node = _create_node('span', node.tail)
node.tail = None
node.addnext(tail_node)
# form node and tag text-based quotes and signature
_tag_matching_regex_in_text(quote_tags, node, 'span', {'text_quote': '1'})
_tag_matching_regex_in_text(signature, node, 'span', {'text_signature': '1'})
# Processing
# ------------------------------------------------------------
# tree: tag nodes
# signature_begin = False # try dynamic signature recognition
quote_begin = False
overlength = False
overlength_section_id = None
overlength_section_count = 0
cur_char_nbr = 0
for node in root.iter():
# comments do not need processing
# note: bug in node.get(value, default) for HtmlComments, default never returned
if node.tag == etree.Comment:
continue
# do not take into account multiple spaces that are displayed as max 1 space in html
node_text = ' '.join((node.text and node.text.strip(' \t\r\n') or '').split())
# root: try to tag the client used to write the html
if 'WordSection1' in node.get('class', '') or 'MsoNormal' in node.get('class', ''):
root.set('msoffice', '1')
if 'SkyDrivePlaceholder' in node.get('class', '') or 'SkyDrivePlaceholder' in node.get('id', ''):
root.set('hotmail', '1')
# protect sections by tagging section limits and blocks contained inside sections, using an increasing id to re-find them later
if node.tag == 'section':
overlength_section_count += 1
node.set('section_closure', str(overlength_section_count))
if node.getparent() is not None and (node.getparent().get('section_closure') or node.getparent().get('section_inner')):
node.set('section_inner', str(overlength_section_count))
# state of the parsing: flag quotes and tails to remove
if quote_begin:
node.set('in_quote', '1')
node.set('tail_remove', '1')
# state of the parsing: flag when being in over-length content, depending on section content if defined (only when having protect_sections)
if overlength:
if not overlength_section_id or int(node.get('section_inner', overlength_section_count + 1)) > overlength_section_count:
node.set('in_overlength', '1')
node.set('tail_remove', '1')
# find quote in msoffice / hotmail / blockquote / text quote and signatures
if root.get('msoffice') and node.tag == 'div' and 'border-top:solid' in node.get('style', ''):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if root.get('hotmail') and node.tag == 'hr' and ('stopSpelling' in node.get('class', '') or 'stopSpelling' in node.get('id', '')):
quote_begin = True
node.set('in_quote', '1')
node.set('tail_remove', '1')
if node.tag == 'blockquote' or node.get('text_quote') or node.get('text_signature'):
# here no quote_begin because we want to be able to remove some quoted
# text without removing all the remaining context
node.set('in_quote', '1')
if node.getparent() is not None and node.getparent().get('in_quote'):
# inside a block of removed text but not in quote_begin (see above)
node.set('in_quote', '1')
# shorten:
# if protect section:
# 1/ find the first parent not being inside a section
# 2/ add the read more link
# else:
# 1/ truncate the text at the next available space
# 2/ create a 'read more' node, next to current node
# 3/ add the truncated text in a new node, next to 'read more' node
node_text = (node.text or '').strip().strip('\n').strip()
if shorten and not overlength and cur_char_nbr + len(node_text) > max_length:
node_to_truncate = node
while node_to_truncate.getparent() is not None:
if node_to_truncate.get('in_quote'):
node_to_truncate = node_to_truncate.getparent()
elif protect_sections and (node_to_truncate.getparent().get('section_inner') or node_to_truncate.getparent().get('section_closure')):
node_to_truncate = node_to_truncate.getparent()
overlength_section_id = node_to_truncate.get('section_closure')
else:
break
overlength = True
node_to_truncate.set('truncate', '1')
if node_to_truncate == node:
node_to_truncate.set('truncate_position', str(max_length - cur_char_nbr))
else:
node_to_truncate.set('truncate_position', str(len(node.text or '')))
cur_char_nbr += len(node_text)
# Tree modification
# ------------------------------------------------------------
for node in root.iter():
if node.get('truncate'):
_truncate_node(node, int(node.get('truncate_position', '0')), node.tag != 'pre')
# Post processing
# ------------------------------------------------------------
to_remove = []
for node in root.iter():
if node.get('in_quote') or node.get('in_overlength'):
# copy the node tail into parent text
if node.tail and not node.get('tail_remove'):
parent = node.getparent()
parent.tail = node.tail + (parent.tail or '')
to_remove.append(node)
if node.get('tail_remove'):
node.tail = ''
# clean node
for attribute_name in ['in_quote', 'tail_remove', 'in_overlength', 'msoffice', 'hotmail', 'truncate', 'truncate_position']:
node.attrib.pop(attribute_name, None)
for node in to_remove:
if remove:
node.getparent().remove(node)
else:
if not expand_options.get('oe_expand_a_class', 'oe_mail_expand') in node.get('class', ''): # trick: read more link should be displayed even if it's in overlength
node_class = node.get('class', '') + ' oe_mail_cleaned'
node.set('class', node_class)
# html: \n that were tail of elements have been encapsulated into <span> -> back to \n
html = etree.tostring(root, pretty_print=False)
linebreaks = re.compile(r'<span[^>]*>([\s]*[\r\n]+[\s]*)<\/span>', re.IGNORECASE | re.DOTALL)
html = _replace_matching_regex(linebreaks, html, '\n')
return html
#----------------------------------------------------------
# HTML/Text management
#----------------------------------------------------------
def html2plaintext(html, body_id=None, encoding='utf-8'):
""" From an HTML text, convert the HTML to plain text.
If @param body_id is provided then this is the tag where the
body (not necessarily <body>) starts.
"""
## (c) Fry-IT, www.fry-it.com, 2007
## <[email protected]>
## download here: http://www.peterbe.com/plog/html2plaintext
html = ustr(html)
tree = etree.fromstring(html, parser=etree.HTMLParser())
if body_id is not None:
source = tree.xpath('//*[@id=%s]' % (body_id,))
else:
source = tree.xpath('//body')
if len(source):
tree = source[0]
url_index = []
i = 0
for link in tree.findall('.//a'):
url = link.get('href')
if url:
i += 1
link.tag = 'span'
link.text = '%s [%s]' % (link.text, i)
url_index.append(url)
html = ustr(etree.tostring(tree, encoding=encoding))
# \r char is converted into , must remove it
html = html.replace(' ', '')
html = html.replace('<strong>', '*').replace('</strong>', '*')
html = html.replace('<b>', '*').replace('</b>', '*')
html = html.replace('<h3>', '*').replace('</h3>', '*')
html = html.replace('<h2>', '**').replace('</h2>', '**')
html = html.replace('<h1>', '**').replace('</h1>', '**')
html = html.replace('<em>', '/').replace('</em>', '/')
html = html.replace('<tr>', '\n')
html = html.replace('</p>', '\n')
html = re.sub('<br\s*/?>', '\n', html)
html = re.sub('<.*?>', ' ', html)
html = html.replace(' ' * 2, ' ')
html = html.replace('>', '>')
html = html.replace('<', '<')
html = html.replace('&', '&')
# strip all lines
html = '\n'.join([x.strip() for x in html.splitlines()])
html = html.replace('\n' * 2, '\n')
for i, url in enumerate(url_index):
if i == 0:
html += '\n\n'
html += ustr('[%s] %s\n') % (i + 1, url)
return html
def plaintext2html(text, container_tag=False):
""" Convert plaintext into html. Content of the text is escaped to manage
html entities, using cgi.escape().
- all \n,\r are replaced by <br />
- enclose content into <p>
- 2 or more consecutive <br /> are considered as paragraph breaks
:param string container_tag: container of the html; by default the
content is embedded into a <div>
"""
text = cgi.escape(ustr(text))
# 1. replace \n and \r
text = text.replace('\n', '<br/>')
text = text.replace('\r', '<br/>')
# 2-3: form paragraphs
idx = 0
final = '<p>'
br_tags = re.compile(r'(([<]\s*[bB][rR]\s*\/?[>]\s*){2,})')
for item in re.finditer(br_tags, text):
final += text[idx:item.start()] + '</p><p>'
idx = item.end()
final += text[idx:] + '</p>'
# 4. container
if container_tag:
final = '<%s>%s</%s>' % (container_tag, final, container_tag)
return ustr(final)
def append_content_to_html(html, content, plaintext=True, preserve=False, container_tag=False):
""" Append extra content at the end of an HTML snippet, trying
to locate the end of the HTML document (</body>, </html>, or
EOF), and converting the provided content in html unless ``plaintext``
is False.
Content conversion can be done in two ways:
- wrapping it into a pre (preserve=True)
- use plaintext2html (preserve=False, using container_tag to wrap the
whole content)
A side-effect of this method is to coerce all HTML tags to
lowercase in ``html``, and strip enclosing <html> or <body> tags in
content if ``plaintext`` is False.
:param str html: html tagsoup (doesn't have to be XHTML)
:param str content: extra content to append
:param bool plaintext: whether content is plaintext and should
be wrapped in a <pre/> tag.
:param bool preserve: if content is plaintext, wrap it into a <pre>
instead of converting it into html
"""
html = ustr(html)
if plaintext and preserve:
content = u'\n<pre>%s</pre>\n' % ustr(content)
elif plaintext:
content = '\n%s\n' % plaintext2html(content, container_tag)
else:
content = re.sub(r'(?i)(</?(?:html|body|head|!\s*DOCTYPE)[^>]*>)', '', content)
content = u'\n%s\n' % ustr(content)
# Force all tags to lowercase
html = re.sub(r'(</?)\W*(\w+)([ >])',
lambda m: '%s%s%s' % (m.group(1), m.group(2).lower(), m.group(3)), html)
insert_location = html.find('</body>')
if insert_location == -1:
insert_location = html.find('</html>')
if insert_location == -1:
return '%s%s' % (html, content)
return '%s%s%s' % (html[:insert_location], content, html[insert_location:])
#----------------------------------------------------------
# Emails
#----------------------------------------------------------
# matches any email in a body of text
email_re = re.compile(r"""([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6})""", re.VERBOSE)
# matches a string containing only one email
single_email_re = re.compile(r"""^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,6}$""", re.VERBOSE)
res_re = re.compile(r"\[([0-9]+)\]", re.UNICODE)
command_re = re.compile("^Set-([a-z]+) *: *(.+)$", re.I + re.UNICODE)
# Updated in 7.0 to match the model name as well
# Typical form of references is <timestamp-openerp-record_id-model_name@domain>
# group(1) = the record ID ; group(2) = the model (if any) ; group(3) = the domain
reference_re = re.compile("<.*-open(?:object|erp)-(\\d+)(?:-([\w.]+))?[^>]*@([^>]*)>", re.UNICODE)
def generate_tracking_message_id(res_id):
"""Returns a string that can be used in the Message-ID RFC822 header field
Used to track the replies related to a given object thanks to the "In-Reply-To"
or "References" fields that Mail User Agents will set.
"""
try:
rnd = random.SystemRandom().random()
except NotImplementedError:
rnd = random.random()
rndstr = ("%.15f" % rnd)[2:]
return "<%.15f.%s-openerp-%s@%s>" % (time.time(), rndstr, res_id, socket.gethostname())
def email_send(email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, openobject_id=False, debug=False, subtype='plain', headers=None,
smtp_server=None, smtp_port=None, ssl=False, smtp_user=None, smtp_password=None, cr=None, uid=None):
"""Low-level function for sending an email (deprecated).
:deprecate: since OpenERP 6.1, please use ir.mail_server.send_email() instead.
:param email_from: A string used to fill the `From` header, if falsy,
config['email_from'] is used instead. Also used for
the `Reply-To` header if `reply_to` is not provided
:param email_to: a sequence of addresses to send the mail to.
"""
# If not cr, get cr from current thread database
local_cr = None
if not cr:
db_name = getattr(threading.currentThread(), 'dbname', None)
if db_name:
local_cr = cr = openerp.registry(db_name).cursor()
else:
raise Exception("No database cursor found, please pass one explicitly")
# Send Email
try:
mail_server_pool = openerp.registry(cr.dbname)['ir.mail_server']
res = False
# Pack Message into MIME Object
email_msg = mail_server_pool.build_email(email_from, email_to, subject, body, email_cc, email_bcc, reply_to,
attachments, message_id, references, openobject_id, subtype, headers=headers)
res = mail_server_pool.send_email(cr, uid or 1, email_msg, mail_server_id=None,
smtp_server=smtp_server, smtp_port=smtp_port, smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=('ssl' if ssl else None), smtp_debug=debug)
except Exception:
_logger.exception("tools.email_send failed to deliver email")
return False
finally:
if local_cr:
cr.close()
return res
def email_split(text):
""" Return a list of the email addresses found in ``text`` """
if not text:
return []
return [addr[1] for addr in getaddresses([text])
# getaddresses() returns '' when email parsing fails, and
# sometimes returns emails without at least '@'. The '@'
# is strictly required in RFC2822's `addr-spec`.
if addr[1]
if '@' in addr[1]]
| agpl-3.0 |
brkrishna/freelance | univs/archives/snc_edu.py | 1 | 3105 | # -- coding: utf-8 --
#-------------------------------------------------------------------------------
# Name: snc_edu
# Purpose: St. Norbert College
#
# Author: Ramakrishna
#
# Dated: 07/Apr/2016
# Copyright: (c) Ramakrishna 2016
# Licence: <your licence>
#-------------------------------------------------------------------------------
import requests, re, os, csv
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from lxml import html
import socks, socket
from collections import OrderedDict
from queue import Queue
from threading import Thread
#socks.setdefaultproxy(proxy_type=socks.PROXY_TYPE_SOCKS5, addr="127.0.0.1", port=9150)
#socket.socket = socks.socksocket
url = 'http://www.snc.edu/cgi-bin/people/search.cgi'
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:45.0) Gecko/20100101 Firefox/45.0'}
#service_args = ['--proxy=127.0.0.1:9150','--proxy-type=socks5',]
def search(term):
try:
server_url = "http://%s:%s/wd/hub" % ('127.0.0.1', '4444')
dc = DesiredCapabilities.HTMLUNIT
d = webdriver.Remote(server_url, dc)
d.get(url)
d.find_element_by_name('str').clear()
d.find_element_by_name('str').send_keys(term.replace("\n", ""))
d.find_element_by_name('sbutton').click()
tree = html.fromstring(d.page_source.encode("utf-8"))
trs = tree.xpath("//table[@style='border-collapse: collapse']//tr")
count = len(trs)
records = []
for i in range(3, count):
rec = "$$$".join(trs[i].xpath("./td[1]//text()[normalize-space()]")).replace("\r\n", "").replace(" ", "").strip()
if 'Student' not in rec:
continue
row = OrderedDict()
try:
row['name'] = rec[:rec.find("Student")].replace("$$$", "").strip()
except:
continue
try:
row['email'] = rec[rec.find("Student$$$")+10:].replace("$$$", "")
except:
pass
records.append(row)
if len(records) > 0:
file_exists = os.path.isfile('snc_edu.csv')
with open('snc_edu.csv', 'a', newline='', encoding='utf-8') as outfile:
fp = csv.DictWriter(outfile, records[0].keys())
if not file_exists:
fp.writeheader()
fp.writerows(records)
with open('snc_terms', 'a') as f:
f.write(term + "\n")
except Exception as e:
print(e.__doc__)
print(e.args)
return None
finally:
if d:
d = None
class Worker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
term = self.queue.get()
search(term)
self.queue.task_done()
def main():
try:
terms = set(open('terms.txt').readlines())
if os.path.isfile('snc_terms'):
finished_terms = set(open('snc_terms').readlines())
terms -= finished_terms
terms = list(terms)
queue = Queue()
for x in range(16):
worker = Worker(queue)
worker.daemon = True
worker.start()
terms_count = len(terms)
for i in range(0, terms_count):
queue.put(terms[i])
queue.join()
except Exception as e:
print(e.__doc__)
print(e.args)
if __name__ == '__main__':
main()
| gpl-2.0 |
Bforartists/scons | scons-local/SCons/Tool/BitKeeper.py | 7 | 2433 | """SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py 2014/07/05 09:42:21 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
stanta/darfchain | darfchain_docker/tests/web/test_blocks.py | 3 | 5828 | import pytest
from bigchaindb.models import Transaction
BLOCKS_ENDPOINT = '/api/v1/blocks/'
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_endpoint(b, client):
tx = Transaction.create([b.me], [([b.me], 1)])
tx = tx.sign([b.me_private])
block = b.create_block([tx])
b.write_block(block)
res = client.get(BLOCKS_ENDPOINT + block.id)
assert block.to_dict() == res.json
assert res.status_code == 200
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_block_returns_404_if_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '123')
assert res.status_code == 404
res = client.get(BLOCKS_ENDPOINT + '123/')
assert res.status_code == 404
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_blocks_by_txid_endpoint(b, client):
tx = Transaction.create([b.me], [([b.me], 1)])
tx = tx.sign([b.me_private])
tx2 = Transaction.create([b.me], [([b.me], 10)])
tx2 = tx2.sign([b.me_private])
block_invalid = b.create_block([tx])
b.write_block(block_invalid)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as undecided
assert res.status_code == 200
assert block_invalid.id in res.json
assert len(res.json) == 1
# vote the block invalid
vote = b.vote(block_invalid.id, b.get_last_voted_block().id, False)
b.write_vote(vote)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as invalid
assert res.status_code == 200
assert block_invalid.id in res.json
assert len(res.json) == 1
# create a new block containing the same tx (and tx2 to avoid block id collision)
block_valid = b.create_block([tx, tx2])
b.write_block(block_valid)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as undecided
assert res.status_code == 200
assert block_valid.id in res.json
assert len(res.json) == 2
# vote the block valid
vote = b.vote(block_valid.id, block_invalid.id, True)
b.write_vote(vote)
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=' + tx.id)
# test if block is retrieved as valid
assert res.status_code == 200
assert block_valid.id in res.json
assert len(res.json) == 2
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_blocks_by_txid_and_status_endpoint(b, client):
from bigchaindb import Bigchain
tx = Transaction.create([b.me], [([b.me], 1)])
tx = tx.sign([b.me_private])
tx2 = Transaction.create([b.me], [([b.me], 10)])
tx2 = tx2.sign([b.me_private])
block_invalid = b.create_block([tx])
b.write_block(block_invalid)
# create a new block containing the same tx (and tx2 to avoid block id collision)
block_valid = b.create_block([tx, tx2])
b.write_block(block_valid)
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_INVALID))
# test if no blocks are retrieved as invalid
assert res.status_code == 200
assert len(res.json) == 0
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_UNDECIDED))
# test if both blocks are retrieved as undecided
assert res.status_code == 200
assert block_valid.id in res.json
assert block_invalid.id in res.json
assert len(res.json) == 2
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_VALID))
# test if no blocks are retrieved as valid
assert res.status_code == 200
assert len(res.json) == 0
# vote one of the blocks invalid
vote = b.vote(block_invalid.id, b.get_last_voted_block().id, False)
b.write_vote(vote)
# vote the other block valid
vote = b.vote(block_valid.id, block_invalid.id, True)
b.write_vote(vote)
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_INVALID))
# test if the invalid block is retrieved as invalid
assert res.status_code == 200
assert block_invalid.id in res.json
assert len(res.json) == 1
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_UNDECIDED))
# test if no blocks are retrieved as undecided
assert res.status_code == 200
assert len(res.json) == 0
res = client.get('{}?transaction_id={}&status={}'.format(BLOCKS_ENDPOINT, tx.id, Bigchain.BLOCK_VALID))
# test if the valid block is retrieved as valid
assert res.status_code == 200
assert block_valid.id in res.json
assert len(res.json) == 1
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_empty_list_not_found(client):
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=')
assert res.status_code == 200
assert len(res.json) == 0
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123')
assert res.status_code == 200
assert len(res.json) == 0
@pytest.mark.bdb
def test_get_blocks_by_txid_endpoint_returns_400_bad_query_params(client):
res = client.get(BLOCKS_ENDPOINT)
assert res.status_code == 400
res = client.get(BLOCKS_ENDPOINT + '?ts_id=123')
assert res.status_code == 400
assert res.json == {
'message': {
'transaction_id': 'Missing required parameter in the JSON body or the post body or the query string'
}
}
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123&foo=123')
assert res.status_code == 400
assert res.json == {
'message': 'Unknown arguments: foo'
}
res = client.get(BLOCKS_ENDPOINT + '?transaction_id=123&status=123')
assert res.status_code == 400
assert res.json == {
'message': {
'status': '123 is not a valid choice'
}
}
| gpl-3.0 |
javilonas/NCam | cross/android-toolchain/lib/python2.7/hmac.py | 253 | 4531 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if hasattr(digestmod, '__call__'):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
# Very low blocksize, most likely a legacy value like
# Lib/sha.py and Lib/md5.py have.
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = self.__class__(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| gpl-3.0 |
shawnadelic/shuup | shuup/customer_group_pricing/admin_form_part.py | 2 | 4175 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from shuup.admin.form_part import FormPart, TemplatedFormDef
from shuup.core.models import ContactGroup, Shop
from shuup.customer_group_pricing.models import CgpPrice
class CustomerGroupPricingForm(forms.Form):
def __init__(self, **kwargs):
self.product = kwargs.pop("product", None)
super(CustomerGroupPricingForm, self).__init__(**kwargs)
self.shops = []
self.groups = []
if self.product:
self._build_fields()
def _build_fields(self):
self.shops = list(Shop.objects.all())
self.groups = list(ContactGroup.objects.filter(
Q(show_pricing=True) |
Q(
id__in=CgpPrice.objects.filter(product=self.product)
.values_list("group_id", flat=True).distinct()
)
))
prices_by_shop_and_group = dict(
((shop_id or 0, group_id or 0), price)
for (shop_id, group_id, price)
in CgpPrice.objects.filter(product=self.product)
.values_list("shop_id", "group_id", "price_value")
)
for group in self.groups:
for shop in self.shops:
shop_group_id_tuple = self._get_id_tuple(shop, group)
name = self._get_field_name(shop_group_id_tuple)
price = prices_by_shop_and_group.get(shop_group_id_tuple)
price_field = forms.DecimalField(
min_value=0, initial=price,
label=(_("Price (%(shop)s/%(group)s)") %
{"shop": shop, "group": group}),
required=False
)
self.fields[name] = price_field
def _get_id_tuple(self, shop, group):
return (
shop.id if shop else 0,
group.id if group else 0
)
def _get_field_name(self, id_tuple):
return "s_%d_g_%d" % id_tuple
def _process_single_save(self, shop, group):
shop_group_id_tuple = self._get_id_tuple(shop, group)
name = self._get_field_name(shop_group_id_tuple)
value = self.cleaned_data.get(name)
clear = (value is None or value < 0)
if clear:
CgpPrice.objects.filter(product=self.product, group=group, shop=shop).delete()
else:
(spp, created) = CgpPrice.objects.get_or_create(
product=self.product, group=group, shop=shop,
defaults={'price_value': value})
if not created:
spp.price_value = value
spp.save()
def save(self):
if not self.has_changed(): # No changes, so no need to do anything.
# (This is required because `.full_clean()` would create an empty `.cleaned_data`,
# but short-circuits out if `has_changed()` returns false.
# That, in kind, would cause `self.cleaned_data.get(name)` in `_process_single_save`
# to return Nones, clearing out all prices. Oops.)
return
for group in self.groups:
for shop in self.shops:
self._process_single_save(shop, group)
def get_shop_group_field(self, shop, group):
shop_group_id_tuple = self._get_id_tuple(shop, group)
name = self._get_field_name(shop_group_id_tuple)
return self[name]
class CustomerGroupPricingFormPart(FormPart):
priority = 10
def get_form_defs(self):
yield TemplatedFormDef(
name="customer_group_pricing",
form_class=CustomerGroupPricingForm,
template_name="shuup/admin/customer_group_pricing/form_part.jinja",
required=False,
kwargs={"product": self.object}
)
def form_valid(self, form):
form["customer_group_pricing"].save()
| agpl-3.0 |
tejasnikumbh/ThesisCode | lib/python2.7/site-packages/numpy/core/tests/test_arrayprint.py | 69 | 6858 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import *
from numpy.compat import sixu
class TestArrayRepr(object):
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([ nan, inf])')
class TestComplexArray(TestCase):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
dtypes = [np.complex64, np.cdouble, np.clongdouble]
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
wanted = [
'[ 0.+0.j]', '[ 0.+0.j]', '[ 0.0+0.0j]',
'[ 0.+1.j]', '[ 0.+1.j]', '[ 0.0+1.0j]',
'[ 0.-1.j]', '[ 0.-1.j]', '[ 0.0-1.0j]',
'[ 0.+infj]', '[ 0.+infj]', '[ 0.0+infj]',
'[ 0.-infj]', '[ 0.-infj]', '[ 0.0-infj]',
'[ 0.+nanj]', '[ 0.+nanj]', '[ 0.0+nanj]',
'[ 1.+0.j]', '[ 1.+0.j]', '[ 1.0+0.0j]',
'[ 1.+1.j]', '[ 1.+1.j]', '[ 1.0+1.0j]',
'[ 1.-1.j]', '[ 1.-1.j]', '[ 1.0-1.0j]',
'[ 1.+infj]', '[ 1.+infj]', '[ 1.0+infj]',
'[ 1.-infj]', '[ 1.-infj]', '[ 1.0-infj]',
'[ 1.+nanj]', '[ 1.+nanj]', '[ 1.0+nanj]',
'[-1.+0.j]', '[-1.+0.j]', '[-1.0+0.0j]',
'[-1.+1.j]', '[-1.+1.j]', '[-1.0+1.0j]',
'[-1.-1.j]', '[-1.-1.j]', '[-1.0-1.0j]',
'[-1.+infj]', '[-1.+infj]', '[-1.0+infj]',
'[-1.-infj]', '[-1.-infj]', '[-1.0-infj]',
'[-1.+nanj]', '[-1.+nanj]', '[-1.0+nanj]',
'[ inf+0.j]', '[ inf+0.j]', '[ inf+0.0j]',
'[ inf+1.j]', '[ inf+1.j]', '[ inf+1.0j]',
'[ inf-1.j]', '[ inf-1.j]', '[ inf-1.0j]',
'[ inf+infj]', '[ inf+infj]', '[ inf+infj]',
'[ inf-infj]', '[ inf-infj]', '[ inf-infj]',
'[ inf+nanj]', '[ inf+nanj]', '[ inf+nanj]',
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.0j]',
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.0j]',
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.0j]',
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
'[ nan+0.j]', '[ nan+0.j]', '[ nan+0.0j]',
'[ nan+1.j]', '[ nan+1.j]', '[ nan+1.0j]',
'[ nan-1.j]', '[ nan-1.j]', '[ nan-1.0j]',
'[ nan+infj]', '[ nan+infj]', '[ nan+infj]',
'[ nan-infj]', '[ nan-infj]', '[ nan-infj]',
'[ nan+nanj]', '[ nan+nanj]', '[ nan+nanj]']
for res, val in zip(actual, wanted):
assert_(res == val)
class TestArray2String(TestCase):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
def test_style_keyword(self):
"""This should only apply to 0-D arrays. See #1218."""
stylestr = np.array2string(np.array(1.5),
style=lambda x: "Value in 0-D array: " + str(x))
assert_(stylestr == 'Value in 0-D array: 1.5')
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
if np.abs(x) < 1:
return '.'
elif np.abs(x) < 2:
return 'o'
else:
return 'O'
x = np.arange(3)
if sys.version_info[0] >= 3:
x_hex = "[0x0 0x1 0x2]"
x_oct = "[0o0 0o1 0o2]"
else:
x_hex = "[0x0L 0x1L 0x2L]"
x_oct = "[0L 01L 02L]"
assert_(np.array2string(x, formatter={'all':_format_function}) == \
"[. o O]")
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==\
"[. o O]")
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == \
"[0.0000 1.0000 2.0000]")
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), \
x_hex)
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), \
x_oct)
x = np.arange(3.)
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == \
"[0.00 1.00 2.00]")
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == \
"[0.00 1.00 2.00]")
s = np.array(['abc', 'def'])
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == \
'[abcabc defdef]')
class TestPrintOptions:
"""Test getting and setting global print options."""
def setUp(self):
self.oldopts = np.get_printoptions()
def tearDown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
x = np.array([1.5, 0, 1.234567890])
assert_equal(repr(x), "array([ 1.5 , 0. , 1.23456789])")
np.set_printoptions(precision=4)
assert_equal(repr(x), "array([ 1.5 , 0. , 1.2346])")
def test_formatter(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
expected = "array(['é'], dtype=object)"
else:
expected = "array([u'\\xe9'], dtype=object)"
x = np.array([sixu('\xe9')], dtype=object)
assert_equal(repr(x), expected)
if __name__ == "__main__":
run_module_suite()
| mit |
jyr/opentumblr | simplejson/decoder.py | 317 | 12404 | """Implementation of JSONDecoder
"""
import re
import sys
import struct
from simplejson.scanner import make_scanner
try:
from simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise ValueError(errmsg(msg, s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise ValueError(errmsg(msg, s, end))
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise ValueError(errmsg(msg, s, end))
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError(errmsg(msg, s, end))
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError(errmsg(msg, s, end))
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| mit |
metacloud/python-keystoneclient | keystoneclient/tests/v3/test_projects.py | 3 | 2261 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import httpretty
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import projects
class ProjectTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(ProjectTests, self).setUp()
self.key = 'project'
self.collection_key = 'projects'
self.model = projects.Project
self.manager = self.client.projects
def new_ref(self, **kwargs):
kwargs = super(ProjectTests, self).new_ref(**kwargs)
kwargs.setdefault('domain_id', uuid.uuid4().hex)
kwargs.setdefault('enabled', True)
kwargs.setdefault('name', uuid.uuid4().hex)
return kwargs
@httpretty.activate
def test_list_projects_for_user(self):
ref_list = [self.new_ref(), self.new_ref()]
user_id = uuid.uuid4().hex
self.stub_entity(httpretty.GET,
['users', user_id, self.collection_key],
entity=ref_list)
returned_list = self.manager.list(user=user_id)
self.assertEqual(len(ref_list), len(returned_list))
[self.assertIsInstance(r, self.model) for r in returned_list]
@httpretty.activate
def test_list_projects_for_domain(self):
ref_list = [self.new_ref(), self.new_ref()]
domain_id = uuid.uuid4().hex
self.stub_entity(httpretty.GET, [self.collection_key],
entity=ref_list)
returned_list = self.manager.list(domain=domain_id)
self.assertEqual(len(ref_list), len(returned_list))
[self.assertIsInstance(r, self.model) for r in returned_list]
self.assertEqual(httpretty.last_request().querystring,
{'domain_id': [domain_id]})
| apache-2.0 |
aYukiSekiguchi/ACCESS-Chromium | tools/valgrind/asan/chrome_tests.py | 1 | 17088 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through asan_test.py.
Most of this code is copied from ../valgrind/chrome_tests.py.
TODO(glider): put common functions to a standalone module.
'''
import glob
import logging
import optparse
import os
import stat
import sys
import logging_utils
import path_utils
import common
import asan_test
class TestNotFound(Exception): pass
def Dir2IsNewer(dir1, dir2):
if dir2 is None or not os.path.isdir(dir2):
return False
if dir1 is None or not os.path.isdir(dir1):
return True
return os.stat(dir2)[stat.ST_MTIME] > os.stat(dir1)[stat.ST_MTIME]
def FindNewestDir(dirs):
newest_dir = None
for dir in dirs:
if Dir2IsNewer(newest_dir, dir):
newest_dir = dir
return newest_dir
def File2IsNewer(file1, file2):
if file2 is None or not os.path.isfile(file2):
return False
if file1 is None or not os.path.isfile(file1):
return True
return os.stat(file2)[stat.ST_MTIME] > os.stat(file1)[stat.ST_MTIME]
def FindDirContainingNewestFile(dirs, file):
"""Searches for the directory containing the newest copy of |file|.
Args:
dirs: A list of paths to the directories to search among.
file: A string containing the file name to search.
Returns:
The string representing the the directory containing the newest copy of
|file|.
Raises:
IOError: |file| was not found.
"""
newest_dir = None
newest_file = None
for dir in dirs:
the_file = os.path.join(dir, file)
if File2IsNewer(newest_file, the_file):
newest_dir = dir
newest_file = the_file
if newest_dir is None:
raise IOError("cannot find file %s anywhere, have you built it?" % file)
return newest_dir
class ChromeTests(object):
'''This class is derived from the chrome_tests.py file in ../purify/.
'''
def __init__(self, options, args, test):
# The known list of tests.
# Recognise the original abbreviations as well as full executable names.
self._test_list = {
"base": self.TestBase, "base_unittests": self.TestBase,
"browser": self.TestBrowser, "browser_tests": self.TestBrowser,
"crypto": self.TestCrypto, "crypto_unittests": self.TestCrypto,
"googleurl": self.TestGURL, "googleurl_unittests": self.TestGURL,
"content": self.TestContent, "content_unittests": self.TestContent,
"courgette": self.TestCourgette,
"courgette_unittests": self.TestCourgette,
"ipc": self.TestIpc, "ipc_tests": self.TestIpc,
"layout": self.TestLayout, "layout_tests": self.TestLayout,
"media": self.TestMedia, "media_unittests": self.TestMedia,
"net": self.TestNet, "net_unittests": self.TestNet,
"printing": self.TestPrinting, "printing_unittests": self.TestPrinting,
"remoting": self.TestRemoting, "remoting_unittests": self.TestRemoting,
"startup": self.TestStartup, "startup_tests": self.TestStartup,
"sync": self.TestSync, "sync_unit_tests": self.TestSync,
"test_shell": self.TestTestShell, "test_shell_tests": self.TestTestShell,
"ui": self.TestUI, "ui_tests": self.TestUI,
"unit": self.TestUnit, "unit_tests": self.TestUnit,
"views": self.TestViews, "views_unittests": self.TestViews,
"sql": self.TestSql, "sql_unittests": self.TestSql,
"ui_unit": self.TestUIUnit, "ui_unittests": self.TestUIUnit,
"gfx": self.TestGfx, "gfx_unittests": self.TestGfx,
}
if test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
self._options = options
self._args = args
self._test = test
script_dir = path_utils.ScriptDir()
# Compute the top of the tree (the "source dir") from the script dir (where
# this script lives). We assume that the script dir is in tools/asan/
# relative to the top of the tree.
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# Since this path is used for string matching, make sure it's always
# an absolute Unix-style path.
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
asan_test_script = os.path.join(script_dir, "asan_test.py")
self._command_preamble = [asan_test_script]
def _DefaultCommand(self, module, exe=None, asan_test_args=None):
'''Generates the default command array that most tests will use.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
exe: The executable name.
asan_test_args: additional arguments to append to the command line.
Returns:
A string with the command to run the test.
'''
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
]
if exe:
self._options.build_dir = FindDirContainingNewestFile(dirs, exe)
else:
self._options.build_dir = FindNewestDir(dirs)
cmd = list(self._command_preamble)
if asan_test_args != None:
for arg in asan_test_args:
cmd.append(arg)
if exe:
cmd.append(os.path.join(self._options.build_dir, exe))
# Show elapased time so we can find the slowpokes.
cmd.append("--gtest_print_time")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
return cmd
def Suppressions(self):
'''Builds the list of available suppressions files.'''
ret = []
directory = path_utils.ScriptDir()
suppression_file = os.path.join(directory, "suppressions.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
suppression_file = os.path.join(directory, "suppressions_linux.txt")
if os.path.exists(suppression_file):
ret.append(suppression_file)
return ret
def Run(self):
'''Runs the test specified by command-line argument --test.'''
logging.info("running test %s" % (self._test))
return self._test_list[self._test]()
def _ReadGtestFilterFile(self, name, cmd):
'''Reads files which contain lists of tests to filter out with
--gtest_filter and appends the command-line option to |cmd|.
Args:
name: the test executable name.
cmd: the test running command line to be modified.
'''
filters = []
directory = path_utils.ScriptDir()
gtest_filter_files = [
os.path.join(directory, name + ".gtest-asan.txt"),
# TODO(glider): Linux vs. CrOS?
]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
filters.append(line)
gtest_filter = self._options.gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
def SimpleTest(self, module, name, asan_test_args=None, cmd_args=None):
'''Builds the command line and runs the specified test.
Args:
module: The module name (corresponds to the dir in src/ where the test
data resides).
name: The executable name.
asan_test_args: Additional command line args for asan.
cmd_args: Additional command line args for the test.
'''
cmd = self._DefaultCommand(module, name, asan_test_args)
supp = self.Suppressions()
self._ReadGtestFilterFile(name, cmd)
if cmd_args:
cmd.extend(["--"])
cmd.extend(cmd_args)
# Sets LD_LIBRARY_PATH to the build folder so external libraries can be
# loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
return asan_test.RunTool(cmd, supp, module)
def TestBase(self):
return self.SimpleTest("base", "base_unittests")
def TestBrowser(self):
return self.SimpleTest("chrome", "browser_tests")
def TestCrypto(self):
return self.SimpleTest("crypto", "crypto_unittests")
def TestGURL(self):
return self.SimpleTest("chrome", "googleurl_unittests")
def TestContent(self):
return self.SimpleTest("content", "content_unittests")
def TestCourgette(self):
return self.SimpleTest("courgette", "courgette_unittests")
def TestMedia(self):
return self.SimpleTest("chrome", "media_unittests")
def TestPrinting(self):
return self.SimpleTest("chrome", "printing_unittests")
def TestRemoting(self):
return self.SimpleTest("chrome", "remoting_unittests")
def TestSync(self):
return self.SimpleTest("chrome", "sync_unit_tests")
def TestIpc(self):
return self.SimpleTest("ipc", "ipc_tests")
def TestNet(self):
return self.SimpleTest("net", "net_unittests")
def TestStartup(self):
# We don't need the performance results, we're just looking for pointer
# errors, so set number of iterations down to the minimum.
os.putenv("STARTUP_TESTS_NUMCYCLES", "1")
logging.info("export STARTUP_TESTS_NUMCYCLES=1");
return self.SimpleTest("chrome", "startup_tests")
def TestTestShell(self):
return self.SimpleTest("webkit", "test_shell_tests")
def TestUnit(self):
return self.SimpleTest("chrome", "unit_tests")
def TestViews(self):
return self.SimpleTest("views", "views_unittests")
def TestSql(self):
return self.SimpleTest("chrome", "sql_unittests")
def TestUIUnit(self):
return self.SimpleTest("chrome", "ui_unittests")
def TestGfx(self):
return self.SimpleTest("chrome", "gfx_unittests")
def TestUI(self):
return self.SimpleTest("chrome", "ui_tests",
cmd_args=[
"--ui-test-action-timeout=80000",
"--ui-test-action-max-timeout=180000"])
def TestLayoutChunk(self, chunk_num, chunk_size):
'''Runs tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size).
Wrap around to beginning of list at end. If chunk_size is zero, run all
tests in the list once. If a text file is given as argument, it is used as
the list of tests.
'''
# Build the ginormous commandline in 'cmd'.
# It's going to be roughly
# python asan_test.py ... python run_webkit_tests.py ...
# but we'll use the --indirect flag to asan_test.py
# to avoid asaning python.
# Start by building the asan_test.py commandline.
cmd = self._DefaultCommand("webkit")
# Now build script_cmd, the run_webkits_tests.py commandline
# Store each chunk in its own directory so that we can find the data later
chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
test_shell = os.path.join(self._options.build_dir, "test_shell")
out_dir = os.path.join(path_utils.ScriptDir(), "latest")
out_dir = os.path.join(out_dir, chunk_dir)
if os.path.exists(out_dir):
old_files = glob.glob(os.path.join(out_dir, "*.txt"))
for f in old_files:
os.remove(f)
else:
os.makedirs(out_dir)
script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
"run_webkit_tests.py")
script_cmd = ["python", script, "--run-singly", "-v",
"--noshow-results", "--time-out-ms=200000",
"--nocheck-sys-deps"]
# Pass build mode to run_webkit_tests.py. We aren't passed it directly,
# so parse it out of build_dir. run_webkit_tests.py can only handle
# the two values "Release" and "Debug".
# TODO(Hercules): unify how all our scripts pass around build mode
# (--mode / --target / --build_dir / --debug)
if self._options.build_dir.endswith("Debug"):
script_cmd.append("--debug");
if (chunk_size > 0):
script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
if len(self._args):
# if the arg is a txt file, then treat it as a list of tests
if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
script_cmd.append("--test-list=%s" % self._args[0])
else:
script_cmd.extend(self._args)
self._ReadGtestFilterFile("layout", script_cmd)
# Now run script_cmd with the wrapper in cmd
cmd.extend(["--"])
cmd.extend(script_cmd)
supp = self.Suppressions()
return asan_test.RunTool(cmd, supp, "layout")
def TestLayout(self):
'''Runs the layout tests.'''
# A "chunk file" is maintained in the local directory so that each test
# runs a slice of the layout tests of size chunk_size that increments with
# each run. Since tests can be added and removed from the layout tests at
# any time, this is not going to give exact coverage, but it will allow us
# to continuously run small slices of the layout tests under purify rather
# than having to run all of them in one shot.
chunk_size = self._options.num_tests
if (chunk_size == 0):
return self.TestLayoutChunk(0, 0)
chunk_num = 0
chunk_file = os.path.join("asan_layout_chunk.txt")
logging.info("Reading state from " + chunk_file)
try:
f = open(chunk_file)
if f:
str = f.read()
if len(str):
chunk_num = int(str)
# This should be enough so that we have a couple of complete runs
# of test data stored in the archive (although note that when we loop
# that we almost guaranteed won't be at the end of the test list)
if chunk_num > 10000:
chunk_num = 0
f.close()
except IOError, (errno, strerror):
logging.error("error reading from file %s (%d, %s)" % (chunk_file,
errno, strerror))
ret = self.TestLayoutChunk(chunk_num, chunk_size)
# Wait until after the test runs to completion to write out the new chunk
# number. This way, if the bot is killed, we'll start running again from
# the current chunk rather than skipping it.
logging.info("Saving state to " + chunk_file)
try:
f = open(chunk_file, "w")
chunk_num += 1
f.write("%d" % chunk_num)
f.close()
except IOError, (errno, strerror):
logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
strerror))
# Since we're running small chunks of the layout tests, it's important to
# mark the ones that have errors in them. These won't be visible in the
# summary list for long, but will be useful for someone reviewing this bot.
return ret
def main():
if not sys.platform.startswith(('linux', 'darwin')):
logging.error("AddressSanitizer works only on Linux and Mac OS "
"at the moment.")
return 1
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.disable_interspersed_args()
parser.add_option("-b", "--build_dir",
help="the location of the output of the compiler output")
parser.add_option("-t", "--test", action="append",
help="which test to run")
parser.add_option("", "--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("", "--gtest_repeat",
help="argument for --gtest_repeat")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
# My machine can do about 120 layout tests/hour in release mode.
# Let's do 30 minutes worth per run.
# The CPU is mostly idle, so perhaps we can raise this when
# we figure out how to run them more efficiently.
parser.add_option("-n", "--num_tests", default=60, type="int",
help="for layout tests: # of subtests per run. 0 for all.")
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if not options.test or not len(options.test):
parser.error("--test not specified")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret:
return ret
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
tangjonathan/HKQuiz | node_modules/pryjs/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/filters/__init__.py | 59 | 11588 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError, text_type, string_types
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(text_type, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
.. versionadded:: 0.8
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in [('spaces', u'·'),
('tabs', u'»'),
('newlines', u'¶')]:
opt = options.get(name, False)
if isinstance(opt, string_types) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""
Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""
Merges consecutive tokens with the same token type in the output stream of a
lexer.
.. versionadded:: 1.2
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
| mit |
marc-sensenich/ansible | lib/ansible/modules/network/fortios/fortios_firewall_vip46.py | 7 | 15899 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_vip46
short_description: Configure IPv4 to IPv6 virtual IPs in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and vip46 category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_vip46:
description:
- Configure IPv4 to IPv6 virtual IPs.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
arp-reply:
description:
- Enable ARP reply.
choices:
- disable
- enable
color:
description:
- Color of icon on the GUI.
comment:
description:
- Comment.
extip:
description:
- Start-external-IP [-end-external-IP].
extport:
description:
- External service port.
id:
description:
- Custom defined id.
ldb-method:
description:
- Load balance method.
choices:
- static
- round-robin
- weighted
- least-session
- least-rtt
- first-alive
mappedip:
description:
- Start-mapped-IP [-end mapped-IP].
mappedport:
description:
- Mapped service port.
monitor:
description:
- Health monitors.
suboptions:
name:
description:
- Health monitor name. Source firewall.ldb-monitor.name.
required: true
name:
description:
- VIP46 name.
required: true
portforward:
description:
- Enable port forwarding.
choices:
- disable
- enable
protocol:
description:
- Mapped port protocol.
choices:
- tcp
- udp
realservers:
description:
- Real servers.
suboptions:
client-ip:
description:
- Restrict server to a client IP in this range.
healthcheck:
description:
- Per server health check.
choices:
- disable
- enable
- vip
holddown-interval:
description:
- Hold down interval.
id:
description:
- Real server ID.
required: true
ip:
description:
- Mapped server IPv6.
max-connections:
description:
- Maximum number of connections allowed to server.
monitor:
description:
- Health monitors. Source firewall.ldb-monitor.name.
port:
description:
- Mapped server port.
status:
description:
- Server administrative status.
choices:
- active
- standby
- disable
weight:
description:
- weight
server-type:
description:
- Server type.
choices:
- http
- tcp
- udp
- ip
src-filter:
description:
- Source IP filter (x.x.x.x/x).
suboptions:
range:
description:
- Src-filter range.
required: true
type:
description:
- "VIP type: static NAT or server load balance."
choices:
- static-nat
- server-load-balance
uuid:
description:
- Universally Unique Identifier (UUID; automatically assigned but can be manually reset).
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 to IPv6 virtual IPs.
fortios_firewall_vip46:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_vip46:
state: "present"
arp-reply: "disable"
color: "4"
comment: "Comment."
extip: "<your_own_value>"
extport: "<your_own_value>"
id: "8"
ldb-method: "static"
mappedip: "<your_own_value>"
mappedport: "<your_own_value>"
monitor:
-
name: "default_name_13 (source firewall.ldb-monitor.name)"
name: "default_name_14"
portforward: "disable"
protocol: "tcp"
realservers:
-
client-ip: "<your_own_value>"
healthcheck: "disable"
holddown-interval: "20"
id: "21"
ip: "<your_own_value>"
max-connections: "23"
monitor: "<your_own_value> (source firewall.ldb-monitor.name)"
port: "25"
status: "active"
weight: "27"
server-type: "http"
src-filter:
-
range: "<your_own_value>"
type: "static-nat"
uuid: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_vip46_data(json):
option_list = ['arp-reply', 'color', 'comment',
'extip', 'extport', 'id',
'ldb-method', 'mappedip', 'mappedport',
'monitor', 'name', 'portforward',
'protocol', 'realservers', 'server-type',
'src-filter', 'type', 'uuid']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_vip46(data, fos):
vdom = data['vdom']
firewall_vip46_data = data['firewall_vip46']
filtered_data = filter_firewall_vip46_data(firewall_vip46_data)
if firewall_vip46_data['state'] == "present":
return fos.set('firewall',
'vip46',
data=filtered_data,
vdom=vdom)
elif firewall_vip46_data['state'] == "absent":
return fos.delete('firewall',
'vip46',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_vip46']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_vip46": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"arp-reply": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"color": {"required": False, "type": "int"},
"comment": {"required": False, "type": "str"},
"extip": {"required": False, "type": "str"},
"extport": {"required": False, "type": "str"},
"id": {"required": False, "type": "int"},
"ldb-method": {"required": False, "type": "str",
"choices": ["static", "round-robin", "weighted",
"least-session", "least-rtt", "first-alive"]},
"mappedip": {"required": False, "type": "str"},
"mappedport": {"required": False, "type": "str"},
"monitor": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"name": {"required": True, "type": "str"},
"portforward": {"required": False, "type": "str",
"choices": ["disable", "enable"]},
"protocol": {"required": False, "type": "str",
"choices": ["tcp", "udp"]},
"realservers": {"required": False, "type": "list",
"options": {
"client-ip": {"required": False, "type": "str"},
"healthcheck": {"required": False, "type": "str",
"choices": ["disable", "enable", "vip"]},
"holddown-interval": {"required": False, "type": "int"},
"id": {"required": True, "type": "int"},
"ip": {"required": False, "type": "str"},
"max-connections": {"required": False, "type": "int"},
"monitor": {"required": False, "type": "str"},
"port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["active", "standby", "disable"]},
"weight": {"required": False, "type": "int"}
}},
"server-type": {"required": False, "type": "str",
"choices": ["http", "tcp", "udp",
"ip"]},
"src-filter": {"required": False, "type": "list",
"options": {
"range": {"required": True, "type": "str"}
}},
"type": {"required": False, "type": "str",
"choices": ["static-nat", "server-load-balance"]},
"uuid": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
naousse/odoo | openerp/report/int_to_text.py | 442 | 2641 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
unites = {
0: '', 1:'un', 2:'deux', 3:'trois', 4:'quatre', 5:'cinq', 6:'six', 7:'sept', 8:'huit', 9:'neuf',
10:'dix', 11:'onze', 12:'douze', 13:'treize', 14:'quatorze', 15:'quinze', 16:'seize',
21:'vingt et un', 31:'trente et un', 41:'quarante et un', 51:'cinquante et un', 61:'soixante et un',
71:'septante et un', 91:'nonante et un', 80:'quatre-vingts'
}
dizaine = {
1: 'dix', 2:'vingt', 3:'trente',4:'quarante', 5:'cinquante', 6:'soixante', 7:'septante', 8:'quatre-vingt', 9:'nonante'
}
centaine = {
0:'', 1: 'cent', 2:'deux cent', 3:'trois cent',4:'quatre cent', 5:'cinq cent', 6:'six cent', 7:'sept cent', 8:'huit cent', 9:'neuf cent'
}
mille = {
0:'', 1:'mille'
}
def _100_to_text(chiffre):
if chiffre in unites:
return unites[chiffre]
else:
if chiffre%10>0:
return dizaine[chiffre / 10]+'-'+unites[chiffre % 10]
else:
return dizaine[chiffre / 10]
def _1000_to_text(chiffre):
d = _100_to_text(chiffre % 100)
d2 = chiffre/100
if d2>0 and d:
return centaine[d2]+' '+d
elif d2>1 and not d:
return centaine[d2]+'s'
else:
return centaine[d2] or d
def _10000_to_text(chiffre):
if chiffre==0:
return 'zero'
part1 = _1000_to_text(chiffre % 1000)
part2 = mille.get(chiffre / 1000, _1000_to_text(chiffre / 1000)+' mille')
if part2 and part1:
part1 = ' '+part1
return part2+part1
def int_to_text(i):
return _10000_to_text(i)
if __name__=='__main__':
for i in range(1,999999,139):
print int_to_text(i)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tboyce021/home-assistant | tests/components/calendar/test_init.py | 21 | 1455 | """The tests for the calendar component."""
from datetime import timedelta
from homeassistant.bootstrap import async_setup_component
import homeassistant.util.dt as dt_util
async def test_events_http_api(hass, hass_client):
"""Test the calendar demo view."""
await async_setup_component(hass, "calendar", {"calendar": {"platform": "demo"}})
await hass.async_block_till_done()
client = await hass_client()
response = await client.get("/api/calendars/calendar.calendar_2")
assert response.status == 400
start = dt_util.now()
end = start + timedelta(days=1)
response = await client.get(
"/api/calendars/calendar.calendar_1?start={}&end={}".format(
start.isoformat(), end.isoformat()
)
)
assert response.status == 200
events = await response.json()
assert events[0]["summary"] == "Future Event"
assert events[0]["title"] == "Future Event"
async def test_calendars_http_api(hass, hass_client):
"""Test the calendar demo view."""
await async_setup_component(hass, "calendar", {"calendar": {"platform": "demo"}})
await hass.async_block_till_done()
client = await hass_client()
response = await client.get("/api/calendars")
assert response.status == 200
data = await response.json()
assert data == [
{"entity_id": "calendar.calendar_1", "name": "Calendar 1"},
{"entity_id": "calendar.calendar_2", "name": "Calendar 2"},
]
| apache-2.0 |
nuclear-wizard/moose | python/mooseutils/VectorPostprocessorReader.py | 6 | 5970 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import glob
import pandas
import bisect
from .MooseDataFrame import MooseDataFrame
from . import message
class VectorPostprocessorReader(object):
"""
A Reader for MOOSE VectorPostprocessor data.
Args:
pattern[str]: A pattern of files (for use with glob) for loading.
MOOSE outputs VectorPostprocessor data in separate files for each timestep, using the timestep as
a prefix. For example: file_000.csv, file_001.csv, etc.
Therefore, a pattern acceptable for use with the python glob package must be supplied. For the
above files, "file_*.csv" should be supplied.
This object manages the loading and unloading of data and should always be in a valid state,
regardless of the existence of a file. It will also append new data and remove old/deleted data
on subsequent calls to "update()".
"""
def __init__(self, pattern, run_start_time=0):
self._pattern = pattern
self._timedata = MooseDataFrame(self._pattern.replace('*', 'time'),
run_start_time=None,
index='timestep')
self._frames = dict()
self._time = -1
self._index = None
self._run_start_time = run_start_time
self.update()
@property
def data(self):
return self._frames.get(self._index, pandas.DataFrame())
@property
def filename(self):
if self._frames:
return self._frames[self._index].filename
def __getitem__(self, keys):
"""
Operator[] returns the data for the current time.
Args:
keys[str|list]: The key(s) to return.
"""
return self._frames[self._index][keys]
def __bool__(self):
"""
Allows this object to be used in boolean cases.
Example:
data = VectorPostprocessorReader('files_*.csv')
if not data:
print 'No data found!'
"""
return self._index in self._frames
def __contains__(self, variable):
"""
Returns true if the variable exists in the data structure.
"""
return variable in self._frames[self._index]
def times(self):
"""
Returns the list of available time indices contained in the data.
"""
return sorted(self._frames.keys())
def clear(self):
"""
Remove all data.
"""
self._frames = dict()
self._index = None
self._time = None
def variables(self):
"""
Return a list of postprocessor variable names listed in the reader.
"""
if self._index is not None:
return self._frames[self._index].data.columns.tolist()
def update(self, time=None):
"""
Update data by adding/removing files.
time[float]: The time at which the data should be returned.
"""
# Update the time
if time is not None:
self._time = time
# Update the time data file
self._timedata.update()
# The list of files from the supplied pattern
last_modified = 0.0
self._frames = dict()
for fname in sorted(glob.glob(self._pattern)):
if fname.endswith('LATEST') or fname.endswith('FINAL') or (fname == self._timedata.filename):
continue
idx = self._timeHelper(fname)
mdf = self._frames.get(idx, None)
if mdf is None:
mdf = MooseDataFrame(fname, run_start_time=self._run_start_time, update=False,
peacock_index=True)
self._frames[idx] = mdf
if (mdf.modified < last_modified):
self._frames.pop(idx)
elif mdf.filesize == 0:
self._frames.pop(idx)
else:
last_modified = mdf.modified
# Clear the data if empty
if self._frames:
self.__updateCurrentIndex()
df = self._frames.get(self._index, None)
if df is not None:
return df.update()
def repr(self):
"""
Return components for building script.
Returns:
(output, imports) The necessary script and include statements to re-create data load.
"""
imports = ['import mooseutils']
output = ['\n# Read VectorPostprocessor Data']
output += ['data = mooseutils.VectorPostprocessorReader({})'.format(repr(self._pattern))]
return output, imports
def _timeHelper(self, filename):
"""
Determine the time index. (protected)
"""
idx = filename.rfind('_') + 1
tstep = int(filename[idx:-4])
if not self._timedata:
return tstep
else:
try:
return self._timedata['time'].loc[tstep]
except Exception:
return tstep
def __updateCurrentIndex(self):
"""
Helper for setting the current key for the supplied time.
"""
if not self._frames:
index = None
# Return the latest time
elif self._time == -1:
index = self.times()[-1]
# Return the specified time
elif self._time in self._frames:
index = self._time
# Find nearest time
else:
times = self.times()
n = len(times)
idx = bisect.bisect_right(times, self._time) - 1
if idx < 0:
idx = 0
elif idx > n:
idx = -1
index = times[idx]
self._index = index
| lgpl-2.1 |
Russell-IO/ansible | test/units/modules/network/f5/test_bigip_pool_member.py | 17 | 11584 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_pool_member import ModuleParameters
from library.modules.bigip_pool_member import ApiParameters
from library.modules.bigip_pool_member import NodeApiParameters
from library.modules.bigip_pool_member import ModuleManager
from library.modules.bigip_pool_member import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_pool_member import ModuleParameters
from ansible.modules.network.f5.bigip_pool_member import ApiParameters
from ansible.modules.network.f5.bigip_pool_member import NodeApiParameters
from ansible.modules.network.f5.bigip_pool_member import ModuleManager
from ansible.modules.network.f5.bigip_pool_member import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
pool='my-pool',
address='1.2.3.4',
fqdn='fqdn.foo.bar',
name='my-name',
port=2345,
connection_limit=100,
description='this is a description',
rate_limit=70,
ratio=20,
preserve_node=False,
priority_group=10,
state='present',
partition='Common',
fqdn_auto_populate=False,
reuse_nodes=False,
# Deprecated params
# TODO(Remove in 2.7)
session_state='disabled',
monitor_state='disabled',
)
p = ModuleParameters(params=args)
assert p.name == 'my-name'
def test_api_parameters(self):
args = load_fixture('load_net_node_with_fqdn.json')
p = ApiParameters(params=args)
assert p.state == 'present'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_reuse_node_with_name(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='my-name',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
password='password',
server='localhost',
user='admin'
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_fqdn.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is True
assert results['fqdn'] == 'foo.bar.com'
assert results['state'] == 'present'
def test_create_reuse_node_with_ipv4_address(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='7.3.67.8',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
password='password',
server='localhost',
user='admin'
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_ipv4_address.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '7.3.67.8'
assert results['state'] == 'present'
def test_create_reuse_node_with_fqdn_auto_populate(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='my-name',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
fqdn_auto_populate=False,
password='password',
server='localhost',
user='admin'
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_fqdn.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is True
assert results['fqdn'] == 'foo.bar.com'
assert results['state'] == 'present'
class TestLegacyManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_name_is_hostname_with_session_and_monitor_enabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='my-name',
port=2345,
state='present',
session_state='enabled',
monitor_state='enabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['fqdn'] == 'my-name'
assert results['state'] == 'present'
def test_create_name_is_address_with_session_and_monitor_enabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='10.10.10.10',
port=2345,
state='present',
session_state='enabled',
monitor_state='enabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '10.10.10.10'
assert results['state'] == 'present'
def test_create_name_is_address_with_session_disabled_and_monitor_enabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='10.10.10.10',
port=2345,
state='present',
monitor_state='enabled',
session_state='disabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '10.10.10.10'
assert results['state'] == 'disabled'
def test_create_name_is_address_with_session_and_monitor_disabled(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
name='10.10.10.10',
port=2345,
state='present',
monitor_state='disabled',
session_state='disabled',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '10.10.10.10'
assert results['state'] == 'forced_offline'
| gpl-3.0 |
eckardm/archivematica | src/MCPClient/lib/clientScripts/generateDIPFromAIPGenerateDIP.py | 1 | 2639 | #!/usr/bin/env python2
# This file is part of Archivematica.
#
# Copyright 2010-2013 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <[email protected]>
import os
import sys
import shutil
import django
django.setup()
# dashboard
from main.models import Job, SIP
# archivematicaCommon
from custom_handlers import get_script_logger
from databaseFunctions import createSIP
if __name__ == '__main__':
logger = get_script_logger("archivematica.mcp.client.generateDIPFromAIPGenerateDIP")
# COPY THE METS FILE
# Move the DIP Directory
fauxUUID = sys.argv[1]
unitPath = sys.argv[2]
date = sys.argv[3]
basename = os.path.basename(unitPath[:-1])
uuidLen = 36
originalSIPName = basename[:-(uuidLen+1)*2]
originalSIPUUID = basename[:-(uuidLen+1)][-uuidLen:]
METSPath = os.path.join(unitPath, "metadata/submissionDocumentation/data/", "METS.%s.xml" % (originalSIPUUID))
if not os.path.isfile(METSPath):
print >>sys.stderr, "Mets file not found: ", METSPath
exit(-1)
# move mets to DIP
src = METSPath
dst = os.path.join(unitPath, "DIP", os.path.basename(METSPath))
shutil.move(src, dst)
# Move DIP
src = os.path.join(unitPath, "DIP")
dst = os.path.join("/var/archivematica/sharedDirectory/watchedDirectories/uploadDIP/", originalSIPName + "-" + originalSIPUUID)
shutil.move(src, dst)
try:
SIP.objects.get(uuid=originalSIPUUID)
except SIP.DoesNotExist:
# otherwise doesn't appear in dashboard
createSIP(unitPath, UUID=originalSIPUUID)
Job.objects.create(jobtype="Hack to make DIP Jobs appear",
directory=unitPath,
sip_id=originalSIPUUID,
currentstep="Completed successfully",
unittype="unitSIP",
microservicegroup="Upload DIP")
| agpl-3.0 |
angyukai/boulderactive2016-landing-page | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
razor-x/scipy-data_fitting | scipy_data_fitting/model.py | 1 | 7085 | import functools
import sympy
class Model:
"""
A model organizes symbols, expressions and replacements rules by name.
Example:
#!python
>>> model = Model()
>>> model.add_symbols('y', 'x', 'm', 'b')
>>> y, m, x, b = model.get_symbols('y', 'x', 'm', 'b')
>>> model.expressions['line'] = y
>>> model.replacements['slope_intercept'] = (y, m * x + b)
>>> line = model.replace('line', 'slope_intercept')
m * x + b
>>> function = model.lambdify(line, ('m', 'x', 'b'))
>>> function(1, 2, 3)
5
"""
def __init__(self, name=None):
self.name = name
"""
The identifier name for this object.
"""
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def expressions(self):
"""
Dictionary to store SymPy expressions by name.
"""
if not hasattr(self, '_expressions'): self._expressions = {}
return self._expressions
@expressions.setter
def expressions(self, value):
self._expressions = value
@property
def replacements(self):
"""
Dictionary to store replacement rules by name.
Each value is a tuple of SymPy expressions: `(expression, replacement)`.
"""
if not hasattr(self, '_replacements'): self._replacements = {}
return self._replacements
@replacements.setter
def replacements(self, value):
self._replacements = value
@property
def replacement_groups(self):
"""
Dictionary to store a sequence of replacements by name.
Each value is a list of names that will be looked up
in `scipy_data_fitting.Model.replacements`.
When used to make substitutions, replacements will be applied
one at a time in the order given.
"""
if not hasattr(self, '_replacement_groups'): self._replacement_groups = {}
return self._replacement_groups
@replacement_groups.setter
def replacement_groups(self, value):
self._replacement_groups = value
@property
def symbols(self):
"""
Dictionary to store symbols by name.
Add symbols directly, or with `scipy_data_fitting.Model.add_symbol`
and `scipy_data_fitting.Model.add_symbols`.
"""
if not hasattr(self, '_symbols'): self._symbols = {}
return self._symbols
@symbols.setter
def symbols(self, value):
self._symbols = value
def symbol(self, name):
"""
Function to provide a shorthand for `self.symbols[name]`.
"""
return self.symbols[name]
def add_symbol(self, name, string=None):
"""
Add a symbol with key `name` to `scipy_data_fitting.Model.symbols`.
Optionally, specify an alternative `string` to pass to [`sympy.Symbol`][1],
otherwise `name` is used.
[1]: http://docs.sympy.org/dev/modules/core.html#id4
"""
if not string: string = name
self.symbols[name] = sympy.Symbol(string)
def add_symbols(self, *names):
"""
Pass any number of strings to add symbols to `scipy_data_fitting.Model.symbols`
using `scipy_data_fitting.Model.add_symbol`.
Example:
#!python
>>> model.add_symbols('x', 'y', 'z')
"""
for name in names:
self.add_symbol(name)
def get_symbols(self, *symbols):
"""
A tuple of symbols by name.
Example:
#!python
>>> x, y, z = model.get_symbols('x', 'y', 'z')
"""
return ( self.symbol(s) for s in symbols )
def replace(self, expression, replacements):
"""
All purpose method to reduce an expression by applying
successive replacement rules.
`expression` is either a SymPy expression
or a key in `scipy_data_fitting.Model.expressions`.
`replacements` can be any of the following,
or a list of any combination of the following:
- A replacement tuple as in `scipy_data_fitting.Model.replacements`.
- The name of a replacement in `scipy_data_fitting.Model.replacements`.
- The name of a replacement group in `scipy_data_fitting.Model.replacement_groups`.
Examples:
#!python
>>> model.replace(x + y, (x, z))
z + y
>>> model.replace('expression', (x, z))
>>> model.replace('expression', 'replacement')
>>> model.replace('expression', ['replacement_1', 'replacement_2'])
>>> model.replace('expression', ['replacement', 'group'])
"""
# When expression is a string,
# get the expressions from self.expressions.
if isinstance(expression, str):
expression = self.expressions[expression]
# Allow for replacements to be empty.
if not replacements:
return expression
# Allow replacements to be a string.
if isinstance(replacements, str):
if replacements in self.replacements:
return self.replace(expression, self.replacements[replacements])
elif replacements in self.replacement_groups:
return self.replace(expression, self.replacement_groups[replacements])
# When replacements is a list of strings or tuples,
# Use reduce to make all the replacements.
if all(isinstance(item, str) for item in replacements) \
or all(isinstance(item, tuple) for item in replacements):
return functools.reduce(self.replace, replacements, expression)
# Otherwise make the replacement.
return expression.replace(*replacements)
def lambdify(self, expression, symbols, **kwargs):
"""
Converts a SymPy expression into a function using [`sympy.lambdify`][1].
`expression` can be a SymPy expression or the name of an expression
in `scipy_data_fitting.Model.expressions`.
`symbols` can be any of the following,
or a list of any combination of the following:
- A SymPy symbol.
- The name of a symbol in `scipy_data_fitting.Model.symbols`.
Additional keyword arguments are passed to [`sympy.lambdify`][1].
[1]: http://docs.sympy.org/latest/modules/utilities/lambdify.html#sympy.utilities.lambdify.lambdify
"""
if isinstance(expression, str):
expression = self.expressions[expression]
if hasattr(symbols, '__iter__'):
variables = []
for s in symbols:
if isinstance(s, str):
variables.append(self.symbol(s))
else:
variables.append(s)
else:
if isinstance(symbols, str):
variables = (self.symbol(symbols), )
else:
variables = (symbols, )
return sympy.lambdify(tuple(variables), expression, **kwargs)
| mit |
simonkuang/grpc | tools/run_tests/performance/massage_qps_stats.py | 25 | 28679 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" in stats:
# Get rid of the "coreStats" element and replace it by statistics
# that correspond to columns in the bigquery schema.
core_stats = stats["coreStats"]
del stats["coreStats"]
stats[
"core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats[
"core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats[
"core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats["core_syscall_epoll_ctl"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_epoll_ctl")
stats[
"core_pollset_fd_cache_hits"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_fd_cache_hits")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats[
"core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats[
"core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats[
"core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats[
"core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats[
"core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting"
)
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update"
)
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled"
)
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats[
"core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats[
"core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats[
"core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats[
"core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats[
"core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats,
"call_initial_size")
stats["core_call_initial_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats[
"core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
| apache-2.0 |
adrienbrault/home-assistant | tests/components/netatmo/test_media_source.py | 13 | 2552 | """Test Local Media Source."""
import ast
import pytest
from homeassistant.components import media_source
from homeassistant.components.media_source import const
from homeassistant.components.media_source.models import PlayMedia
from homeassistant.components.netatmo import DATA_CAMERAS, DATA_EVENTS, DOMAIN
from homeassistant.setup import async_setup_component
from tests.common import load_fixture
async def test_async_browse_media(hass):
"""Test browse media."""
assert await async_setup_component(hass, DOMAIN, {})
# Prepare cached Netatmo event date
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_EVENTS] = ast.literal_eval(
load_fixture("netatmo/events.txt")
)
hass.data[DOMAIN][DATA_CAMERAS] = {
"12:34:56:78:90:ab": "MyCamera",
"12:34:56:78:90:ac": "MyOutdoorCamera",
}
assert await async_setup_component(hass, const.DOMAIN, {})
await hass.async_block_till_done()
# Test camera not exists
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/98:76:54:32:10:ff"
)
assert str(excinfo.value) == "Camera does not exist."
# Test browse event
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab/12345"
)
assert str(excinfo.value) == "Event does not exist."
# Test invalid base
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/invalid/base"
)
assert str(excinfo.value) == "Unknown source directory."
# Test successful listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/"
)
# Test successful events listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab"
)
# Test successful event listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab/1599152672"
)
assert media
# Test successful event resolve
media = await media_source.async_resolve_media(
hass, f"{const.URI_SCHEME}{DOMAIN}/events/12:34:56:78:90:ab/1599152672"
)
assert media == PlayMedia(
url="http:///files/high/index.m3u8", mime_type="application/x-mpegURL"
)
| mit |
Zhongqilong/mykbengineer | kbe/src/lib/python/PCbuild/build_ssl.py | 30 | 9208 | # Script for building the _ssl and _hashlib modules for Windows.
# Uses Perl to setup the OpenSSL environment correctly
# and build OpenSSL, then invokes a simple nmake session
# for the actual _ssl.pyd and _hashlib.pyd DLLs.
# THEORETICALLY, you can:
# * Unpack the latest SSL release one level above your main Python source
# directory. It is likely you will already find the zlib library and
# any other external packages there.
# * Install ActivePerl and ensure it is somewhere on your path.
# * Run this script from the PCBuild directory.
#
# it should configure and build SSL, then build the _ssl and _hashlib
# Python extensions without intervention.
# Modified by Christian Heimes
# Now this script supports pre-generated makefiles and assembly files.
# Developers don't need an installation of Perl anymore to build Python. A svn
# checkout from our svn repository is enough.
#
# In Order to create the files in the case of an update you still need Perl.
# Run build_ssl in this order:
# python.exe build_ssl.py Release x64
# python.exe build_ssl.py Release Win32
import os, sys, re, shutil
import subprocess
# Find all "foo.exe" files on the PATH.
def find_all_on_path(filename, extras = None):
entries = os.environ["PATH"].split(os.pathsep)
ret = []
for p in entries:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
if extras:
for p in extras:
fname = os.path.abspath(os.path.join(p, filename))
if os.path.isfile(fname) and fname not in ret:
ret.append(fname)
return ret
# Find a suitable Perl installation for OpenSSL.
# cygwin perl does *not* work. ActivePerl does.
# Being a Perl dummy, the simplest way I can check is if the "Win32" package
# is available.
def find_working_perl(perls):
for perl in perls:
try:
subprocess.check_output([perl, "-e", "use Win32;"])
except subprocess.CalledProcessError:
continue
else:
return perl
if perls:
print("The following perl interpreters were found:")
for p in perls:
print(" ", p)
print(" None of these versions appear suitable for building OpenSSL")
else:
print("NO perl interpreters were found on this machine at all!")
print(" Please install ActivePerl and ensure it appears on your path")
# Fetch SSL directory from VC properties
def get_ssl_dir():
propfile = (os.path.join(os.path.dirname(__file__), 'pyproject.props'))
with open(propfile, encoding='utf-8-sig') as f:
m = re.search('openssl-([^<]+)<', f.read())
return "..\..\openssl-"+m.group(1)
def create_makefile64(makefile, m32):
"""Create and fix makefile for 64bit
Replace 32 with 64bit directories
"""
if not os.path.isfile(m32):
return
with open(m32) as fin:
with open(makefile, 'w') as fout:
for line in fin:
line = line.replace("=tmp32", "=tmp64")
line = line.replace("=out32", "=out64")
line = line.replace("=inc32", "=inc64")
# force 64 bit machine
line = line.replace("MKLIB=lib", "MKLIB=lib /MACHINE:X64")
line = line.replace("LFLAGS=", "LFLAGS=/MACHINE:X64 ")
# don't link against the lib on 64bit systems
line = line.replace("bufferoverflowu.lib", "")
fout.write(line)
os.unlink(m32)
def fix_makefile(makefile):
"""Fix some stuff in all makefiles
"""
if not os.path.isfile(makefile):
return
with open(makefile) as fin:
lines = fin.readlines()
with open(makefile, 'w') as fout:
for line in lines:
if line.startswith("PERL="):
continue
if line.startswith("CP="):
line = "CP=copy\n"
if line.startswith("MKDIR="):
line = "MKDIR=mkdir\n"
if line.startswith("CFLAG="):
line = line.strip()
for algo in ("RC5", "MDC2", "IDEA"):
noalgo = " -DOPENSSL_NO_%s" % algo
if noalgo not in line:
line = line + noalgo
line = line + '\n'
fout.write(line)
def run_configure(configure, do_script):
print("perl Configure "+configure+" no-idea no-mdc2")
os.system("perl Configure "+configure+" no-idea no-mdc2")
print(do_script)
os.system(do_script)
def cmp(f1, f2):
bufsize = 1024 * 8
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
def copy(src, dst):
if os.path.isfile(dst) and cmp(src, dst):
return
shutil.copy(src, dst)
def main():
build_all = "-a" in sys.argv
if sys.argv[1] == "Release":
debug = False
elif sys.argv[1] == "Debug":
debug = True
else:
raise ValueError(str(sys.argv))
if sys.argv[2] == "Win32":
arch = "x86"
configure = "VC-WIN32"
do_script = "ms\\do_nasm"
makefile="ms\\nt.mak"
m32 = makefile
dirsuffix = "32"
elif sys.argv[2] == "x64":
arch="amd64"
configure = "VC-WIN64A"
do_script = "ms\\do_win64a"
makefile = "ms\\nt64.mak"
m32 = makefile.replace('64', '')
dirsuffix = "64"
#os.environ["VSEXTCOMP_USECL"] = "MS_OPTERON"
else:
raise ValueError(str(sys.argv))
make_flags = ""
if build_all:
make_flags = "-a"
# perl should be on the path, but we also look in "\perl" and "c:\\perl"
# as "well known" locations
perls = find_all_on_path("perl.exe", ["\\perl\\bin", "C:\\perl\\bin"])
perl = find_working_perl(perls)
if perl:
print("Found a working perl at '%s'" % (perl,))
else:
print("No Perl installation was found. Existing Makefiles are used.")
sys.stdout.flush()
# Look for SSL 2 levels up from pcbuild - ie, same place zlib etc all live.
ssl_dir = get_ssl_dir()
if ssl_dir is None:
sys.exit(1)
old_cd = os.getcwd()
try:
os.chdir(ssl_dir)
# rebuild makefile when we do the role over from 32 to 64 build
if arch == "amd64" and os.path.isfile(m32) and not os.path.isfile(makefile):
os.unlink(m32)
# If the ssl makefiles do not exist, we invoke Perl to generate them.
# Due to a bug in this script, the makefile sometimes ended up empty
# Force a regeneration if it is.
if not os.path.isfile(makefile) or os.path.getsize(makefile)==0:
if perl is None:
print("Perl is required to build the makefiles!")
sys.exit(1)
print("Creating the makefiles...")
sys.stdout.flush()
# Put our working Perl at the front of our path
os.environ["PATH"] = os.path.dirname(perl) + \
os.pathsep + \
os.environ["PATH"]
run_configure(configure, do_script)
if debug:
print("OpenSSL debug builds aren't supported.")
#if arch=="x86" and debug:
# # the do_masm script in openssl doesn't generate a debug
# # build makefile so we generate it here:
# os.system("perl util\mk1mf.pl debug "+configure+" >"+makefile)
if arch == "amd64":
create_makefile64(makefile, m32)
fix_makefile(makefile)
copy(r"crypto\buildinf.h", r"crypto\buildinf_%s.h" % arch)
copy(r"crypto\opensslconf.h", r"crypto\opensslconf_%s.h" % arch)
# If the assembler files don't exist in tmpXX, copy them there
if perl is None and os.path.exists("asm"+dirsuffix):
if not os.path.exists("tmp"+dirsuffix):
os.mkdir("tmp"+dirsuffix)
for f in os.listdir("asm"+dirsuffix):
if not f.endswith(".asm"): continue
if os.path.isfile(r"tmp%s\%s" % (dirsuffix, f)): continue
shutil.copy(r"asm%s\%s" % (dirsuffix, f), "tmp"+dirsuffix)
# Now run make.
if arch == "amd64":
rc = os.system("nasm -f win64 -DNEAR -Ox -g ms\\uptable.asm")
if rc:
print("nasm assembler has failed.")
sys.exit(rc)
copy(r"crypto\buildinf_%s.h" % arch, r"crypto\buildinf.h")
copy(r"crypto\opensslconf_%s.h" % arch, r"crypto\opensslconf.h")
#makeCommand = "nmake /nologo PERL=\"%s\" -f \"%s\"" %(perl, makefile)
makeCommand = "nmake /nologo -f \"%s\"" % makefile
print("Executing ssl makefiles:", makeCommand)
sys.stdout.flush()
rc = os.system(makeCommand)
if rc:
print("Executing "+makefile+" failed")
print(rc)
sys.exit(rc)
finally:
os.chdir(old_cd)
sys.exit(rc)
if __name__=='__main__':
main()
| lgpl-3.0 |
hehongliang/tensorflow | tensorflow/python/distribute/shared_variable_creator.py | 45 | 3938 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to re-use variables created on first device on subsequent devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
_VARIABLE_UNIQUIFYING_REGEX = re.compile(r"_\d/")
_VARIABLE_UNIQUIFYING_REGEX_AT_END = re.compile(r"_\d$")
def _canonicalize_variable_name(name):
# If no name is specified, uses default name "Variable".
if name is None:
return "Variable"
# Replace all instances of "_<num>/" with "/"
name = _VARIABLE_UNIQUIFYING_REGEX.sub("/", name)
# Replace any instances of "_<num>" at the end of the string with ""
name = _VARIABLE_UNIQUIFYING_REGEX_AT_END.sub("", name)
return name
def make_fn(shared_variable_store, device_id):
"""Construct the variable creator function for device `device_id`.
Constructs custom variable creator functions for the given device.
On first device (device_id == 0), it creates the variable using the
`next_creator`, and stores it in the provided `shared_variable_store`.
On all other devices (device_id > 0), it tries to re-use the variable
already created with the same name. If no such variable exists, it throws an
error.
Additionally, we de-uniquify variable names before checking for matches. This
helps re-use variables which are intended to be the same but have different
names due to variable uniquification happening upstream. Since this might
mean we may have multiple variables with the same canonical name, we store
them in a list per canonical name and return them in the same order as well.
Args:
shared_variable_store: A dictionary that we will use to store variables
created on the first device, and re-used by creators for other devices.
device_id: Integer index of the device whose creator should be
constructed.
Returns:
An appropriate creator function based on device_id.
"""
variable_scope_access_index = {}
assert isinstance(device_id, int)
def create_new_variable(next_creator, *args, **kwargs):
"""Create the variable using `next_creator` and store it."""
canonical_name = _canonicalize_variable_name(kwargs.get("name"))
v = next_creator(*args, **kwargs)
if canonical_name not in shared_variable_store:
shared_variable_store[canonical_name] = []
shared_variable_store[canonical_name].append(v)
return v
def reuse_variable(next_creator, *args, **kwargs):
"""Re-use existing variable from store with same name (in order)."""
del next_creator, args
name = kwargs.get("name")
canonical_name = _canonicalize_variable_name(name)
try:
variable_index = variable_scope_access_index.get(canonical_name, 0)
v = shared_variable_store[canonical_name][variable_index]
# TODO(priyag): Make this variable re-use more robust by adding checks
# that the requested shape and dtype match the existing variable.
variable_scope_access_index[canonical_name] = variable_index + 1
return v
except (KeyError, IndexError):
raise RuntimeError(
"Tried to create variable {} with mismatching name on device {}".
format(name, device_id))
if device_id == 0:
return create_new_variable
else:
return reuse_variable
| apache-2.0 |
pleaseproject/python-for-android | python-modules/twisted/twisted/conch/test/test_openssh_compat.py | 60 | 3381 | # Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.openssh_compat}.
"""
import os
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.python.compat import set
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
OpenSSHFactory = None
else:
from twisted.conch.openssh_compat.factory import OpenSSHFactory
from twisted.conch.test import keydata
from twisted.test.test_process import MockOS
class OpenSSHFactoryTests(TestCase):
"""
Tests for L{OpenSSHFactory}.
"""
if getattr(os, "geteuid", None) is None:
skip = "geteuid/seteuid not available"
elif OpenSSHFactory is None:
skip = "Cannot run without PyCrypto or PyASN1"
def setUp(self):
self.factory = OpenSSHFactory()
self.keysDir = FilePath(self.mktemp())
self.keysDir.makedirs()
self.factory.dataRoot = self.keysDir.path
self.keysDir.child("ssh_host_foo").setContent("foo")
self.keysDir.child("bar_key").setContent("foo")
self.keysDir.child("ssh_host_one_key").setContent(
keydata.privateRSA_openssh)
self.keysDir.child("ssh_host_two_key").setContent(
keydata.privateDSA_openssh)
self.keysDir.child("ssh_host_three_key").setContent(
"not a key content")
self.keysDir.child("ssh_host_one_key.pub").setContent(
keydata.publicRSA_openssh)
self.mockos = MockOS()
self.patch(os, "seteuid", self.mockos.seteuid)
self.patch(os, "setegid", self.mockos.setegid)
def test_getPublicKeys(self):
"""
L{OpenSSHFactory.getPublicKeys} should return the available public keys
in the data directory
"""
keys = self.factory.getPublicKeys()
self.assertEquals(len(keys), 1)
keyTypes = keys.keys()
self.assertEqual(keyTypes, ['ssh-rsa'])
def test_getPrivateKeys(self):
"""
L{OpenSSHFactory.getPrivateKeys} should return the available private
keys in the data directory.
"""
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [])
self.assertEquals(self.mockos.setegidCalls, [])
def test_getPrivateKeysAsRoot(self):
"""
L{OpenSSHFactory.getPrivateKeys} should switch to root if the keys
aren't readable by the current user.
"""
keyFile = self.keysDir.child("ssh_host_two_key")
# Fake permission error by changing the mode
keyFile.chmod(0000)
self.addCleanup(keyFile.chmod, 0777)
# And restore the right mode when seteuid is called
savedSeteuid = os.seteuid
def seteuid(euid):
keyFile.chmod(0777)
return savedSeteuid(euid)
self.patch(os, "seteuid", seteuid)
keys = self.factory.getPrivateKeys()
self.assertEquals(len(keys), 2)
keyTypes = keys.keys()
self.assertEqual(set(keyTypes), set(['ssh-rsa', 'ssh-dss']))
self.assertEquals(self.mockos.seteuidCalls, [0, os.geteuid()])
self.assertEquals(self.mockos.setegidCalls, [0, os.getegid()])
| apache-2.0 |
photoninger/ansible | lib/ansible/modules/monitoring/circonus_annotation.py | 89 | 7162 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014-2015, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: circonus_annotation
short_description: create an annotation in circonus
description:
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
author: "Nick Harring (@NickatEpic)"
version_added: 2.0
requirements:
- requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
notes:
- Check mode isn't supported.
options:
api_key:
description:
- Circonus API key
required: true
category:
description:
- Annotation Category
required: true
description:
description:
- Description of annotation
required: true
title:
description:
- Title of annotation
required: true
start:
description:
- Unix timestamp of event start
default: I(now)
stop:
description:
- Unix timestamp of event end
default: I(now) + I(duration)
duration:
description:
- Duration in seconds of annotation
default: 0
'''
EXAMPLES = '''
# Create a simple annotation event with a source, defaults to start and end time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
# Create an annotation with a duration of 5 minutes and a default start time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
duration: 300
# Create an annotation with a start_time and end_time
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
start_time: 1395940006
end_time: 1395954407
'''
RETURN = '''
annotation:
description: details about the created annotation
returned: success
type: complex
contains:
_cid:
description: annotation identifier
returned: success
type: string
sample: /annotation/100000
_created:
description: creation timestamp
returned: success
type: int
sample: 1502236928
_last_modified:
description: last modification timestamp
returned: success
type: int
sample: 1502236928
_last_modified_by:
description: last modified by
returned: success
type: string
sample: /user/1000
category:
description: category of the created annotation
returned: success
type: string
sample: alerts
title:
description: title of the created annotation
returned: success
type: string
sample: WARNING
description:
description: description of the created annotation
returned: success
type: string
sample: Host is down.
start:
description: timestamp, since annotation applies
returned: success
type: int
sample: Host is down.
stop:
description: timestamp, since annotation ends
returned: success
type: string
sample: Host is down.
rel_metrics:
description: Array of metrics related to this annotation, each metrics is a string.
returned: success
type: list
sample:
- 54321_kbps
'''
import json
import time
import traceback
from distutils.version import LooseVersion
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_native
def check_requests_dep(module):
"""Check if an adequate requests version is available"""
if not HAS_REQUESTS:
module.fail_json(msg='requests is required for this module')
else:
required_version = '2.0.0' if PY3 else '1.0.0'
if LooseVersion(requests.__version__) < LooseVersion(required_version):
module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
def post_annotation(annotation, api_key):
''' Takes annotation dict and api_key string'''
base_url = 'https://api.circonus.com/v2'
anootate_post_endpoint = '/annotation'
resp = requests.post(base_url + anootate_post_endpoint,
headers=build_headers(api_key), data=json.dumps(annotation))
resp.raise_for_status()
return resp
def create_annotation(module):
''' Takes ansible module object '''
annotation = {}
duration = module.params['duration']
if module.params['start'] is not None:
start = module.params['start']
else:
start = int(time.time())
if module.params['stop'] is not None:
stop = module.params['stop']
else:
stop = int(time.time()) + duration
annotation['start'] = start
annotation['stop'] = stop
annotation['category'] = module.params['category']
annotation['description'] = module.params['description']
annotation['title'] = module.params['title']
return annotation
def build_headers(api_token):
'''Takes api token, returns headers with it included.'''
headers = {'X-Circonus-App-Name': 'ansible',
'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
'Accept': 'application/json'}
return headers
def main():
'''Main function, dispatches logic'''
module = AnsibleModule(
argument_spec=dict(
start=dict(type='int'),
stop=dict(type='int'),
category=dict(required=True),
title=dict(required=True),
description=dict(required=True),
duration=dict(default=0, type='int'),
api_key=dict(required=True, no_log=True)
)
)
check_requests_dep(module)
annotation = create_annotation(module)
try:
resp = post_annotation(annotation, module.params['api_key'])
except requests.exceptions.RequestException as e:
module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, annotation=resp.json())
if __name__ == '__main__':
main()
| gpl-3.0 |
jlowin/airflow | scripts/perf/scheduler_ops_metrics.py | 30 | 6536 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = filter(lambda x: x.state == State.SUCCESS, tis)
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(datetime.today() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(datetime.now()-self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if (len(successful_tis) == num_task_instances):
self.logger.info("All tasks processed! Printing stats.")
else:
self.logger.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
eminence/Minecraft-Overviewer | overviewer_core/cache.py | 6 | 5470 | # This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
"""This module has supporting functions for the caching logic used in world.py.
Each cache class should implement the standard container type interface
(__getitem__ and __setitem__), as well as provide a "hits" and "misses"
attribute.
"""
import functools
import logging
import cPickle
class LRUCache(object):
"""A simple, generic, in-memory LRU cache that implements the standard
python container interface.
An ordered dict type would simplify this implementation a bit, but we want
Python 2.6 compatibility and the standard library ordereddict was added in
2.7. It's probably okay because this implementation can be tuned for
exactly what we need and nothing more.
This implementation keeps a linked-list of cache keys and values, ordered
in least-recently-used order. A dictionary maps keys to linked-list nodes.
On cache hit, the link is moved to the end of the list. On cache miss, the
first item of the list is evicted. All operations have constant time
complexity (dict lookups are worst case O(n) time)
"""
class _LinkNode(object):
__slots__ = ['left', 'right', 'key', 'value']
def __init__(self,l=None,r=None,k=None,v=None):
self.left = l
self.right = r
self.key = k
self.value = v
def __init__(self, size=100, destructor=None):
"""Initialize a new LRU cache with the given size.
destructor, if given, is a callable that is called upon an item being
evicted from the cache. It takes one argument, the value stored in the
cache.
"""
self.cache = {}
# Two sentinel nodes at the ends of the linked list simplify boundary
# conditions in the code below.
self.listhead = LRUCache._LinkNode()
self.listtail = LRUCache._LinkNode()
self.listhead.right = self.listtail
self.listtail.left = self.listhead
self.hits = 0
self.misses = 0
self.size = size
self.destructor = destructor
# Initialize an empty cache of the same size for worker processes
def __getstate__(self):
return self.size
def __setstate__(self, size):
self.__init__(size)
def __getitem__(self, key):
try:
link = self.cache[key]
except KeyError:
self.misses += 1
raise
# Disconnect the link from where it is
link.left.right = link.right
link.right.left = link.left
# Insert the link at the end of the list
tail = self.listtail
link.left = tail.left
link.right = tail
tail.left.right = link
tail.left = link
self.hits += 1
return link.value
def __setitem__(self, key, value):
cache = self.cache
if key in cache:
# Shortcut this case
cache[key].value = value
return
if len(cache) >= self.size:
# Evict a node
link = self.listhead.right
del cache[link.key]
link.left.right = link.right
link.right.left = link.left
d = self.destructor
if d:
d(link.value)
del link
# The node doesn't exist already, and we have room for it. Let's do this.
tail = self.listtail
link = LRUCache._LinkNode(tail.left, tail,key,value)
tail.left.right = link
tail.left = link
cache[key] = link
def __delitem__(self, key):
# Used to flush the cache of this key
cache = self.cache
link = cache[key]
del cache[key]
link.left.right = link.right
link.right.left = link.left
# Call the destructor
d = self.destructor
if d:
d(link.value)
# memcached is an option, but unless your IO costs are really high, it just
# ends up adding overhead and isn't worth it.
try:
import memcache
except ImportError:
class Memcached(object):
def __init__(*args):
raise ImportError("No module 'memcache' found. Please install python-memcached")
else:
class Memcached(object):
def __init__(self, conn='127.0.0.1:11211'):
self.conn = conn
self.mc = memcache.Client([conn], debug=0, pickler=cPickle.Pickler, unpickler=cPickle.Unpickler)
def __getstate__(self):
return self.conn
def __setstate__(self, conn):
self.__init__(conn)
def __getitem__(self, key):
v = self.mc.get(key)
if not v:
raise KeyError()
return v
def __setitem__(self, key, value):
self.mc.set(key, value)
| gpl-3.0 |
vladimir-ipatov/ganeti | lib/workerpool.py | 7 | 18694 | #
#
# Copyright (C) 2008, 2009, 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Base classes for worker pools.
"""
import logging
import threading
import heapq
import itertools
from ganeti import compat
from ganeti import errors
_TERMINATE = object()
_DEFAULT_PRIORITY = 0
class DeferTask(Exception):
"""Special exception class to defer a task.
This class can be raised by L{BaseWorker.RunTask} to defer the execution of a
task. Optionally, the priority of the task can be changed.
"""
def __init__(self, priority=None):
"""Initializes this class.
@type priority: number
@param priority: New task priority (None means no change)
"""
Exception.__init__(self)
self.priority = priority
class NoSuchTask(Exception):
"""Exception raised when a task can't be found.
"""
class BaseWorker(threading.Thread, object):
"""Base worker class for worker pools.
Users of a worker pool must override RunTask in a subclass.
"""
# pylint: disable=W0212
def __init__(self, pool, worker_id):
"""Constructor for BaseWorker thread.
@param pool: the parent worker pool
@param worker_id: identifier for this worker
"""
super(BaseWorker, self).__init__(name=worker_id)
self.pool = pool
self._worker_id = worker_id
self._current_task = None
assert self.getName() == worker_id
def ShouldTerminate(self):
"""Returns whether this worker should terminate.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
return self.pool._ShouldWorkerTerminateUnlocked(self)
finally:
self.pool._lock.release()
def GetCurrentPriority(self):
"""Returns the priority of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(priority, _, _, _) = self._current_task
return priority
finally:
self.pool._lock.release()
def SetTaskName(self, taskname):
"""Sets the name of the current task.
Should only be called from within L{RunTask}.
@type taskname: string
@param taskname: Task's name
"""
if taskname:
name = "%s/%s" % (self._worker_id, taskname)
else:
name = self._worker_id
# Set thread name
self.setName(name)
def _HasRunningTaskUnlocked(self):
"""Returns whether this worker is currently running a task.
"""
return (self._current_task is not None)
def _GetCurrentOrderAndTaskId(self):
"""Returns the order and task ID of the current task.
Should only be called from within L{RunTask}.
"""
self.pool._lock.acquire()
try:
assert self._HasRunningTaskUnlocked()
(_, order_id, task_id, _) = self._current_task
return (order_id, task_id)
finally:
self.pool._lock.release()
def run(self):
"""Main thread function.
Waits for new tasks to show up in the queue.
"""
pool = self.pool
while True:
assert self._current_task is None
defer = None
try:
# Wait on lock to be told either to terminate or to do a task
pool._lock.acquire()
try:
task = pool._WaitForTaskUnlocked(self)
if task is _TERMINATE:
# Told to terminate
break
if task is None:
# Spurious notification, ignore
continue
self._current_task = task
# No longer needed, dispose of reference
del task
assert self._HasRunningTaskUnlocked()
finally:
pool._lock.release()
(priority, _, _, args) = self._current_task
try:
# Run the actual task
assert defer is None
logging.debug("Starting task %r, priority %s", args, priority)
assert self.getName() == self._worker_id
try:
self.RunTask(*args) # pylint: disable=W0142
finally:
self.SetTaskName(None)
logging.debug("Done with task %r, priority %s", args, priority)
except DeferTask, err:
defer = err
if defer.priority is None:
# Use same priority
defer.priority = priority
logging.debug("Deferring task %r, new priority %s",
args, defer.priority)
assert self._HasRunningTaskUnlocked()
except: # pylint: disable=W0702
logging.exception("Caught unhandled exception")
assert self._HasRunningTaskUnlocked()
finally:
# Notify pool
pool._lock.acquire()
try:
if defer:
assert self._current_task
# Schedule again for later run
(_, _, task_id, args) = self._current_task
pool._AddTaskUnlocked(args, defer.priority, task_id)
if self._current_task:
self._current_task = None
pool._worker_to_pool.notifyAll()
finally:
pool._lock.release()
assert not self._HasRunningTaskUnlocked()
logging.debug("Terminates")
def RunTask(self, *args):
"""Function called to start a task.
This needs to be implemented by child classes.
"""
raise NotImplementedError()
class WorkerPool(object):
"""Worker pool with a queue.
This class is thread-safe.
Tasks are guaranteed to be started in the order in which they're
added to the pool. Due to the nature of threading, they're not
guaranteed to finish in the same order.
@type _tasks: list of tuples
@ivar _tasks: Each tuple has the format (priority, order ID, task ID,
arguments). Priority and order ID are numeric and essentially control the
sort order. The order ID is an increasing number denoting the order in
which tasks are added to the queue. The task ID is controlled by user of
workerpool, see L{AddTask} for details. The task arguments are C{None} for
abandoned tasks, otherwise a sequence of arguments to be passed to
L{BaseWorker.RunTask}). The list must fulfill the heap property (for use by
the C{heapq} module).
@type _taskdata: dict; (task IDs as keys, tuples as values)
@ivar _taskdata: Mapping from task IDs to entries in L{_tasks}
"""
def __init__(self, name, num_workers, worker_class):
"""Constructor for worker pool.
@param num_workers: number of workers to be started
(dynamic resizing is not yet implemented)
@param worker_class: the class to be instantiated for workers;
should derive from L{BaseWorker}
"""
# Some of these variables are accessed by BaseWorker
self._lock = threading.Lock()
self._pool_to_pool = threading.Condition(self._lock)
self._pool_to_worker = threading.Condition(self._lock)
self._worker_to_pool = threading.Condition(self._lock)
self._worker_class = worker_class
self._name = name
self._last_worker_id = 0
self._workers = []
self._quiescing = False
self._active = True
# Terminating workers
self._termworkers = []
# Queued tasks
self._counter = itertools.count()
self._tasks = []
self._taskdata = {}
# Start workers
self.Resize(num_workers)
# TODO: Implement dynamic resizing?
def _WaitWhileQuiescingUnlocked(self):
"""Wait until the worker pool has finished quiescing.
"""
while self._quiescing:
self._pool_to_pool.wait()
def _AddTaskUnlocked(self, args, priority, task_id):
"""Adds a task to the internal queue.
@type args: sequence
@param args: Arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
"""
assert isinstance(args, (tuple, list)), "Arguments must be a sequence"
assert isinstance(priority, (int, long)), "Priority must be numeric"
assert task_id is None or isinstance(task_id, (int, long)), \
"Task ID must be numeric or None"
task = [priority, self._counter.next(), task_id, args]
if task_id is not None:
assert task_id not in self._taskdata
# Keep a reference to change priority later if necessary
self._taskdata[task_id] = task
# A counter is used to ensure elements are processed in their incoming
# order. For processing they're sorted by priority and then counter.
heapq.heappush(self._tasks, task)
# Notify a waiting worker
self._pool_to_worker.notify()
def AddTask(self, args, priority=_DEFAULT_PRIORITY, task_id=None):
"""Adds a task to the queue.
@type args: sequence
@param args: arguments passed to L{BaseWorker.RunTask}
@type priority: number
@param priority: Task priority
@param task_id: Task ID
@note: The task ID can be essentially anything that can be used as a
dictionary key. Callers, however, must ensure a task ID is unique while a
task is in the pool or while it might return to the pool due to deferring
using L{DeferTask}.
"""
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
self._AddTaskUnlocked(args, priority, task_id)
finally:
self._lock.release()
def AddManyTasks(self, tasks, priority=_DEFAULT_PRIORITY, task_id=None):
"""Add a list of tasks to the queue.
@type tasks: list of tuples
@param tasks: list of args passed to L{BaseWorker.RunTask}
@type priority: number or list of numbers
@param priority: Priority for all added tasks or a list with the priority
for each task
@type task_id: list
@param task_id: List with the ID for each task
@note: See L{AddTask} for a note on task IDs.
"""
assert compat.all(isinstance(task, (tuple, list)) for task in tasks), \
"Each task must be a sequence"
assert (isinstance(priority, (int, long)) or
compat.all(isinstance(prio, (int, long)) for prio in priority)), \
"Priority must be numeric or be a list of numeric values"
assert task_id is None or isinstance(task_id, (tuple, list)), \
"Task IDs must be in a sequence"
if isinstance(priority, (int, long)):
priority = [priority] * len(tasks)
elif len(priority) != len(tasks):
raise errors.ProgrammerError("Number of priorities (%s) doesn't match"
" number of tasks (%s)" %
(len(priority), len(tasks)))
if task_id is None:
task_id = [None] * len(tasks)
elif len(task_id) != len(tasks):
raise errors.ProgrammerError("Number of task IDs (%s) doesn't match"
" number of tasks (%s)" %
(len(task_id), len(tasks)))
self._lock.acquire()
try:
self._WaitWhileQuiescingUnlocked()
assert compat.all(isinstance(prio, (int, long)) for prio in priority)
assert len(tasks) == len(priority)
assert len(tasks) == len(task_id)
for (args, prio, tid) in zip(tasks, priority, task_id):
self._AddTaskUnlocked(args, prio, tid)
finally:
self._lock.release()
def ChangeTaskPriority(self, task_id, priority):
"""Changes a task's priority.
@param task_id: Task ID
@type priority: number
@param priority: New task priority
@raise NoSuchTask: When the task referred by C{task_id} can not be found
(it may never have existed, may have already been processed, or is
currently running)
"""
assert isinstance(priority, (int, long)), "Priority must be numeric"
self._lock.acquire()
try:
logging.debug("About to change priority of task %s to %s",
task_id, priority)
# Find old task
oldtask = self._taskdata.get(task_id, None)
if oldtask is None:
msg = "Task '%s' was not found" % task_id
logging.debug(msg)
raise NoSuchTask(msg)
# Prepare new task
newtask = [priority] + oldtask[1:]
# Mark old entry as abandoned (this doesn't change the sort order and
# therefore doesn't invalidate the heap property of L{self._tasks}).
# See also <http://docs.python.org/library/heapq.html#priority-queue-
# implementation-notes>.
oldtask[-1] = None
# Change reference to new task entry and forget the old one
assert task_id is not None
self._taskdata[task_id] = newtask
# Add a new task with the old number and arguments
heapq.heappush(self._tasks, newtask)
# Notify a waiting worker
self._pool_to_worker.notify()
finally:
self._lock.release()
def SetActive(self, active):
"""Enable/disable processing of tasks.
This is different from L{Quiesce} in the sense that this function just
changes an internal flag and doesn't wait for the queue to be empty. Tasks
already being processed continue normally, but no new tasks will be
started. New tasks can still be added.
@type active: bool
@param active: Whether tasks should be processed
"""
self._lock.acquire()
try:
self._active = active
if active:
# Tell all workers to continue processing
self._pool_to_worker.notifyAll()
finally:
self._lock.release()
def _WaitForTaskUnlocked(self, worker):
"""Waits for a task for a worker.
@type worker: L{BaseWorker}
@param worker: Worker thread
"""
while True:
if self._ShouldWorkerTerminateUnlocked(worker):
return _TERMINATE
# If there's a pending task, return it immediately
if self._active and self._tasks:
# Get task from queue and tell pool about it
try:
task = heapq.heappop(self._tasks)
finally:
self._worker_to_pool.notifyAll()
(_, _, task_id, args) = task
# If the priority was changed, "args" is None
if args is None:
# Try again
logging.debug("Found abandoned task (%r)", task)
continue
# Delete reference
if task_id is not None:
del self._taskdata[task_id]
return task
logging.debug("Waiting for tasks")
# wait() releases the lock and sleeps until notified
self._pool_to_worker.wait()
logging.debug("Notified while waiting")
def _ShouldWorkerTerminateUnlocked(self, worker):
"""Returns whether a worker should terminate.
"""
return (worker in self._termworkers)
def _HasRunningTasksUnlocked(self):
"""Checks whether there's a task running in a worker.
"""
for worker in self._workers + self._termworkers:
if worker._HasRunningTaskUnlocked(): # pylint: disable=W0212
return True
return False
def HasRunningTasks(self):
"""Checks whether there's at least one task running.
"""
self._lock.acquire()
try:
return self._HasRunningTasksUnlocked()
finally:
self._lock.release()
def Quiesce(self):
"""Waits until the task queue is empty.
"""
self._lock.acquire()
try:
self._quiescing = True
# Wait while there are tasks pending or running
while self._tasks or self._HasRunningTasksUnlocked():
self._worker_to_pool.wait()
finally:
self._quiescing = False
# Make sure AddTasks continues in case it was waiting
self._pool_to_pool.notifyAll()
self._lock.release()
def _NewWorkerIdUnlocked(self):
"""Return an identifier for a new worker.
"""
self._last_worker_id += 1
return "%s%d" % (self._name, self._last_worker_id)
def _ResizeUnlocked(self, num_workers):
"""Changes the number of workers.
"""
assert num_workers >= 0, "num_workers must be >= 0"
logging.debug("Resizing to %s workers", num_workers)
current_count = len(self._workers)
if current_count == num_workers:
# Nothing to do
pass
elif current_count > num_workers:
if num_workers == 0:
# Create copy of list to iterate over while lock isn't held.
termworkers = self._workers[:]
del self._workers[:]
else:
# TODO: Implement partial downsizing
raise NotImplementedError()
#termworkers = ...
self._termworkers += termworkers
# Notify workers that something has changed
self._pool_to_worker.notifyAll()
# Join all terminating workers
self._lock.release()
try:
for worker in termworkers:
logging.debug("Waiting for thread %s", worker.getName())
worker.join()
finally:
self._lock.acquire()
# Remove terminated threads. This could be done in a more efficient way
# (del self._termworkers[:]), but checking worker.isAlive() makes sure we
# don't leave zombie threads around.
for worker in termworkers:
assert worker in self._termworkers, ("Worker not in list of"
" terminating workers")
if not worker.isAlive():
self._termworkers.remove(worker)
assert not self._termworkers, "Zombie worker detected"
elif current_count < num_workers:
# Create (num_workers - current_count) new workers
for _ in range(num_workers - current_count):
worker = self._worker_class(self, self._NewWorkerIdUnlocked())
self._workers.append(worker)
worker.start()
def Resize(self, num_workers):
"""Changes the number of workers in the pool.
@param num_workers: the new number of workers
"""
self._lock.acquire()
try:
return self._ResizeUnlocked(num_workers)
finally:
self._lock.release()
def TerminateWorkers(self):
"""Terminate all worker threads.
Unstarted tasks will be ignored.
"""
logging.debug("Terminating all workers")
self._lock.acquire()
try:
self._ResizeUnlocked(0)
if self._tasks:
logging.debug("There are %s tasks left", len(self._tasks))
finally:
self._lock.release()
logging.debug("All workers terminated")
| gpl-2.0 |
thiagomg/experiments | math/question.py | 4 | 2911 | import sys
#compatibility
try: input = raw_input
except NameError: pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
colors = {
'GREEN': bcolors.OKGREEN,
'BLUE': bcolors.OKBLUE,
'MAGENTA': bcolors.HEADER,
'PURPLE': bcolors.HEADER,
'YELLOW': bcolors.WARNING,
'RED': bcolors.FAIL,
'NONE': bcolors.ENDC
}
attribs = {
'BOLD' : bcolors.BOLD,
'UNDERLINE': bcolors.UNDERLINE,
}
exit_cond = lambda x: x in {'q', 'quit', 'leave', 'exit'}
def set_exit_cond(condition):
global exit_cond
exit_cond = condition
def get_char(s, char_list):
while( True ):
string = input(s)
if exit_cond(string):
return None
if string in char_list:
return string
def get_number(s, max_val=None):
while( True ):
try:
string = input(s)
if exit_cond(string):
return None
val = int(string)
if max_val is None or val <= max_val:
return val
except:
print ('Not a number. Try again')
def get_string(s):
string = input(s)
if exit_cond(string):
return None
return string
def get_word(s):
string = input(s)
if exit_cond(string):
return False
return True
def ask_addition_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' + ' + str(n) + ' = ')
if result == None:
return -1
if result == (m+n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_multiplication_question(m, n):
for i in range(1, 4):
result = get_number(str(m) + ' x ' + str(n) + ' = ')
if result == None:
return -1
if result == (m*n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_subtraction_question(m, n):
for i in range(1, 4):
if m < n:
m, n = n, m
result = get_number(str(m) + ' - ' + str(n) + ' = ')
if result == None:
return -1
if result == (m-n):
print ('Correct !')
return 1
else:
print ('Wrong. try again!')
return 0
def ask_word_question(word):
return get_word(' ' + word + ' ')
def write(text, color=None, *attrib):
prefix = ''
sufix = ''
if not color is None:
prefix += colors[color.upper()]
for at in attrib:
prefix += attribs[at.upper()]
if len(prefix) > 0:
sufix = colors['NONE']
print (prefix + text + sufix)
| mit |
info-labs/owlbot | owlbot/tests/test_secrets.py | 1 | 4149 | """Test the secrets module.
As most of the functions in secrets are thin wrappers around functions
defined elsewhere, we don't need to test them exhaustively.
"""
from ..pep506 import secrets
import unittest
import string
# For Python 2/3 compatibility.
try:
unicode
except NameError:
# Python 3.
unicode = str
# === Unit tests ===
class Compare_Digest_Tests(unittest.TestCase):
"""Test secrets.compare_digest function."""
def test_equal(self):
# Test compare_digest functionality with equal strings.
for s in ("a", "bcd", "xyz123"):
a = s*100
b = s*100
self.assertTrue(secrets.compare_digest(a, b))
def test_unequal(self):
# Test compare_digest functionality with unequal strings.
self.assertFalse(secrets.compare_digest("abc", "abcd"))
for s in ("x", "mn", "a1b2c3"):
a = s*100 + "q"
b = s*100 + "k"
self.assertFalse(secrets.compare_digest(a, b))
def test_bad_types(self):
# Test that compare_digest raises with mixed types.
a = "abcde" # str in Python3, bytes in Python2.
a = a.encode('ascii')
assert isinstance(a, bytes)
b = a.decode('ascii')
assert isinstance(b, unicode)
self.assertRaises(TypeError, secrets.compare_digest, a, b)
self.assertRaises(TypeError, secrets.compare_digest, b, a)
def test_bool(self):
# Test that compare_digest returns a bool.
self.assertTrue(isinstance(secrets.compare_digest("abc", "abc"), bool))
self.assertTrue(isinstance(secrets.compare_digest("abc", "xyz"), bool))
class Random_Tests(unittest.TestCase):
"""Test wrappers around SystemRandom methods."""
def test_randbits(self):
# Test randbits.
errmsg = "randbits(%d) returned %d"
for numbits in (3, 12, 30):
for i in range(6):
n = secrets.randbits(numbits)
self.assertTrue(0 <= n < 2**numbits, errmsg % (numbits, n))
def test_choice(self):
# Test choice.
items = [1, 2, 4, 8, 16, 32, 64]
for i in range(10):
self.assertTrue(secrets.choice(items) in items)
def test_randbelow(self):
# Test randbelow.
errmsg = "randbelow(%d) returned %d"
for i in range(2, 10):
n = secrets.randbelow(i)
self.assertTrue(n in range(i), errmsg % (i, n))
self.assertRaises(ValueError, secrets.randbelow, 0)
class Token_Tests(unittest.TestCase):
"""Test token functions."""
def test_token_defaults(self):
# Test that token_* functions handle default size correctly.
for func in (secrets.token_bytes, secrets.token_hex,
secrets.token_urlsafe):
name = func.__name__
try:
func()
except TypeError:
self.fail("%s cannot be called with no argument" % name)
try:
func(None)
except TypeError:
self.fail("%s cannot be called with None" % name)
size = secrets.DEFAULT_ENTROPY
self.assertEqual(len(secrets.token_bytes(None)), size)
self.assertEqual(len(secrets.token_hex(None)), 2*size)
def test_token_bytes(self):
# Test token_bytes.
self.assertTrue(isinstance(secrets.token_bytes(11), bytes))
for n in (1, 8, 17, 100):
self.assertEqual(len(secrets.token_bytes(n)), n)
def test_token_hex(self):
# Test token_hex.
self.assertTrue(isinstance(secrets.token_hex(7), unicode))
for n in (1, 12, 25, 90):
s = secrets.token_hex(n)
self.assertEqual(len(s), 2*n)
self.assertTrue(all(c in string.hexdigits for c in s))
def test_token_urlsafe(self):
# Test token_urlsafe.
self.assertTrue(isinstance(secrets.token_urlsafe(9), unicode))
legal = string.ascii_letters + string.digits + '-_'
for n in (1, 11, 28, 76):
self.assertTrue(all(c in legal for c in secrets.token_urlsafe(n)))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
vijaylbais/boto | boto/rds/statusinfo.py | 180 | 2011 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class StatusInfo(object):
"""
Describes a status message.
"""
def __init__(self, status_type=None, normal=None, status=None, message=None):
self.status_type = status_type
self.normal = normal
self.status = status
self.message = message
def __repr__(self):
return 'StatusInfo:%s' % self.message
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'StatusType':
self.status_type = value
elif name == 'Normal':
if value.lower() == 'true':
self.normal = True
else:
self.normal = False
elif name == 'Status':
self.status = value
elif name == 'Message':
self.message = value
else:
setattr(self, name, value)
| mit |
nE0sIghT/pcsx2 | 3rdparty/wxwidgets3.0/src/msw/wince/clean_vcp.py | 45 | 1324 | '''
This script will delete dependences from *.vcp files.
After using this script, next time when you will try to save project,
you will have wait until 'Visual Tools' will rebuild all dependencies
and this process might take HUGE amount of time
Author : Viktor Voroshylo
'''
__version__='$Revision$'[11:-2]
import sys
if len(sys.argv) != 2 :
print "Usage: %s project_file.vcp" % sys.argv[0]
sys.exit(0)
vsp_filename = sys.argv[1]
exclude_line = 0
resultLines = []
vsp_file = open(vsp_filename, "r")
empty_if_start = -1
line = vsp_file.readline()
while line :
skip_line = 0
if exclude_line :
if not line.endswith("\\\n") : exclude_line = 0
skip_line = 1
elif line.startswith("DEP_CPP_") or line.startswith("NODEP_CPP_") :
exclude_line = 1
skip_line = 1
elif empty_if_start != -1 :
if line == "!ENDIF \n" :
resultLines = resultLines[:empty_if_start]
empty_if_start = -1
skip_line = 1
elif line != "\n" and not line.startswith("!ELSEIF ") :
empty_if_start = -1
elif line.startswith("!IF ") :
empty_if_start = len(resultLines)
if not skip_line :
resultLines.append(line)
line = vsp_file.readline()
open(vsp_filename, "w").write("".join(resultLines))
| gpl-2.0 |
alaski/nova | nova/tests/unit/objects/test_hv_spec.py | 46 | 2308 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from nova import objects
from nova.tests.unit.objects import test_objects
spec_dict = {
'arch': arch.I686,
'hv_type': hv_type.KVM,
'vm_mode': vm_mode.HVM
}
spec_list = [
arch.I686,
hv_type.KVM,
vm_mode.HVM
]
spec_dict_vz = {
'arch': arch.I686,
'hv_type': hv_type.VIRTUOZZO,
'vm_mode': vm_mode.HVM
}
spec_dict_parallels = {
'arch': arch.I686,
'hv_type': hv_type.PARALLELS,
'vm_mode': vm_mode.HVM
}
class _TestHVSpecObject(object):
def test_hv_spec_from_list(self):
spec_obj = objects.HVSpec.from_list(spec_list)
self.compare_obj(spec_obj, spec_dict)
def test_hv_spec_to_list(self):
spec_obj = objects.HVSpec()
spec_obj.arch = arch.I686
spec_obj.hv_type = hv_type.KVM
spec_obj.vm_mode = vm_mode.HVM
spec = spec_obj.to_list()
self.assertEqual(spec_list, spec)
def test_hv_spec_obj_make_compatible(self):
spec_dict_vz_copy = spec_dict_vz.copy()
# check 1.1->1.0 compatibility
objects.HVSpec().obj_make_compatible(spec_dict_vz_copy, '1.0')
self.assertEqual(spec_dict_parallels, spec_dict_vz_copy)
# check that nothing changed
objects.HVSpec().obj_make_compatible(spec_dict_vz_copy, '1.1')
self.assertEqual(spec_dict_parallels, spec_dict_vz_copy)
class TestHVSpecObject(test_objects._LocalTest,
_TestHVSpecObject):
pass
class TestRemoteHVSpecObject(test_objects._RemoteTest,
_TestHVSpecObject):
pass
| apache-2.0 |
snbueno/blivet | blivet/devices/disk.py | 2 | 21440 | # devices/disk.py
# Classes to represent various types of disk-like devices.
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <[email protected]>
#
import os
from gi.repository import BlockDev as blockdev
from .. import errors
from .. import util
from ..flags import flags
from ..storage_log import log_method_call
from .. import udev
from ..size import Size
from ..fcoe import fcoe
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from .container import ContainerDevice
from .network import NetworkStorageDevice
from .dm import DMDevice
class DiskDevice(StorageDevice):
""" A local/generic disk.
This is not the only kind of device that is treated as a disk. More
useful than checking isinstance(device, DiskDevice) is checking
device.isDisk.
"""
_type = "disk"
_partitionable = True
_isDisk = True
def __init__(self, name, fmt=None,
size=None, major=None, minor=None, sysfsPath='',
parents=None, serial=None, vendor="", model="", bus="",
exists=True):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword uuid: universally unique identifier (device -- not fs)
:type uuid: str
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword removable: whether or not this is a removable device
:type removable: bool
:keyword serial: the ID_SERIAL_RAW, ID_SERIAL or ID_SERIAL_SHORT for
this device (which one is available)
:type serial: str
:keyword vendor: the manufacturer of this Device
:type vendor: str
:keyword model: manufacturer's device model string
:type model: str
:keyword bus: the interconnect this device uses
:type bus: str
DiskDevices always exist.
"""
StorageDevice.__init__(self, name, fmt=fmt, size=size,
major=major, minor=minor, exists=exists,
sysfsPath=sysfsPath, parents=parents,
serial=serial, model=model,
vendor=vendor, bus=bus)
def __repr__(self):
s = StorageDevice.__repr__(self)
s += (" removable = %(removable)s partedDevice = %(partedDevice)r" %
{"removable": self.removable, "partedDevice": self.partedDevice})
return s
@property
def mediaPresent(self):
if flags.testing:
return True
if not self.partedDevice:
return False
# Some drivers (cpqarray <blegh>) make block device nodes for
# controllers with no disks attached and then report a 0 size,
# treat this as no media present
return Size(self.partedDevice.getLength(unit="B")) != Size(0)
@property
def description(self):
return self.model
@property
def size(self):
""" The disk's size """
return super(DiskDevice, self).size
def _preDestroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.mediaPresent:
raise errors.DeviceError("cannot destroy disk with no media", self.name)
StorageDevice._preDestroy(self)
class DiskFile(DiskDevice):
""" This is a file that we will pretend is a disk.
This is intended only for testing purposes. The benefit of this class
is that you can instantiate a disk-like device with a working disklabel
class as a non-root user. It is not known how the system will behave if
partitions are committed to one of these disks.
"""
_devDir = ""
def __init__(self, name, fmt=None,
size=None, major=None, minor=None, sysfsPath='',
parents=None, serial=None, vendor="", model="", bus="",
exists=True):
"""
:param str name: the full path to the backing regular file
:keyword :class:`~.formats.DeviceFormat` fmt: the device's format
"""
_name = os.path.basename(name)
self._devDir = os.path.dirname(name)
super(DiskFile, self).__init__(_name, fmt=fmt, size=size,
major=major, minor=minor, sysfsPath=sysfsPath,
parents=parents, serial=serial, vendor=vendor,
model=model, bus=bus, exists=exists)
#
# Regular files do not have sysfs entries.
#
@property
def sysfsPath(self):
return ""
@sysfsPath.setter
def sysfsPath(self, value):
pass
def updateSysfsPath(self):
pass
class DMRaidArrayDevice(DMDevice, ContainerDevice):
""" A dmraid (device-mapper RAID) device """
_type = "dm-raid array"
_packages = ["dmraid"]
_partitionable = True
_isDisk = True
_formatClassName = property(lambda s: "dmraidmember")
_formatUUIDAttr = property(lambda s: None)
def __init__(self, name, fmt=None,
size=None, parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
DMRaidArrayDevices always exist. Blivet cannot create or destroy
them.
"""
super(DMRaidArrayDevice, self).__init__(name, fmt=fmt, size=size,
parents=parents, exists=True,
sysfsPath=sysfsPath)
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def deactivate(self):
""" Deactivate the raid set. """
log_method_call(self, self.name, status=self.status)
# This call already checks if the set is not active.
blockdev.dm_deactivate_raid_set(self.name)
def activate(self):
""" Activate the raid set. """
log_method_call(self, self.name, status=self.status)
# This call already checks if the set is active.
blockdev.dm_activate_raid_set(self.name)
udev.settle()
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
self.activate()
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
if not self._preTeardown(recursive=recursive):
return
log.debug("not tearing down dmraid device %s", self.name)
def _add(self, member):
raise NotImplementedError()
def _remove(self, member):
raise NotImplementedError()
@property
def description(self):
return "BIOS RAID set (%s)" % blockdev.dm_get_raid_set_type(self.name)
@property
def model(self):
return self.description
def dracutSetupArgs(self):
return set(["rd.dm.uuid=%s" % self.name])
class MultipathDevice(DMDevice):
""" A multipath device """
_type = "dm-multipath"
_packages = ["device-mapper-multipath"]
_partitionable = True
_isDisk = True
def __init__(self, name, fmt=None, size=None, serial=None,
parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword serial: the device's serial number
:type serial: str
MultipathDevices always exist. Blivet cannot create or destroy
them.
"""
DMDevice.__init__(self, name, fmt=fmt, size=size,
parents=parents, sysfsPath=sysfsPath,
exists=True)
self.identity = serial
self.config = {
'wwid' : self.identity,
'mode' : '0600',
'uid' : '0',
'gid' : '0',
}
@property
def wwid(self):
identity = self.identity
ret = []
while identity:
ret.append(identity[:2])
identity = identity[2:]
return ":".join(ret)
@property
def model(self):
if not self.parents:
return ""
return self.parents[0].model
@property
def vendor(self):
if not self.parents:
return ""
return self.parents[0].vendor
@property
def description(self):
return "WWID %s" % (self.wwid,)
def addParent(self, parent):
""" Add a parent device to the mpath. """
log_method_call(self, self.name, status=self.status)
if self.status:
self.teardown()
self.parents.append(parent)
self.setup()
else:
self.parents.append(parent)
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
udev.settle()
rc = util.run_program(["multipath", self.name])
if rc:
raise errors.MPathError("multipath activation failed for '%s'" %
self.name, hardware_fault=True)
def _postSetup(self):
StorageDevice._postSetup(self)
self.setupPartitions()
udev.settle()
class iScsiDiskDevice(DiskDevice, NetworkStorageDevice):
""" An iSCSI disk. """
_type = "iscsi"
_packages = ["iscsi-initiator-utils", "dracut-network"]
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword node: ???
:type node: str
:keyword ibft: use iBFT
:type ibft: bool
:keyword nic: name of NIC to use
:type nic: str
:keyword initiator: initiator name
:type initiator: str
:keyword fw_name: qla4xxx partial offload
:keyword fw_address: qla4xxx partial offload
:keyword fw_port: qla4xxx partial offload
"""
self.node = kwargs.pop("node")
self.ibft = kwargs.pop("ibft")
self.nic = kwargs.pop("nic")
self.initiator = kwargs.pop("initiator")
if self.node is None:
# qla4xxx partial offload
name = kwargs.pop("fw_name")
address = kwargs.pop("fw_address")
port = kwargs.pop("fw_port")
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self,
host_address=address,
nic=self.nic)
log.debug("created new iscsi disk %s %s:%s using fw initiator %s",
name, address, port, self.initiator)
else:
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self, host_address=self.node.address,
nic=self.nic)
log.debug("created new iscsi disk %s %s:%d via %s:%s", self.node.name,
self.node.address,
self.node.port,
self.node.iface,
self.nic)
def dracutSetupArgs(self):
if self.ibft:
return set(["iscsi_firmware"])
# qla4xxx partial offload
if self.node is None:
return set()
address = self.node.address
# surround ipv6 addresses with []
if ":" in address:
address = "[%s]" % address
netroot="netroot=iscsi:"
auth = self.node.getAuth()
if auth:
netroot += "%s:%s" % (auth.username, auth.password)
if len(auth.reverse_username) or len(auth.reverse_password):
netroot += ":%s:%s" % (auth.reverse_username,
auth.reverse_password)
iface_spec = ""
if self.nic != "default":
iface_spec = ":%s:%s" % (self.node.iface, self.nic)
netroot += "@%s::%d%s::%s" % (address,
self.node.port,
iface_spec,
self.node.name)
initiator = "iscsi_initiator=%s" % self.initiator
return set([netroot, initiator])
class FcoeDiskDevice(DiskDevice, NetworkStorageDevice):
""" An FCoE disk. """
_type = "fcoe"
_packages = ["fcoe-utils", "dracut-network"]
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword nic: name of NIC to use
:keyword identifier: ???
"""
self.nic = kwargs.pop("nic")
self.identifier = kwargs.pop("identifier")
DiskDevice.__init__(self, device, **kwargs)
NetworkStorageDevice.__init__(self, nic=self.nic)
log.debug("created new fcoe disk %s (%s) @ %s",
device, self.identifier, self.nic)
def dracutSetupArgs(self):
dcb = True
for nic, dcb, _auto_vlan in fcoe().nics:
if nic == self.nic:
break
else:
return set()
if dcb:
dcbOpt = "dcb"
else:
dcbOpt = "nodcb"
if self.nic in fcoe().added_nics:
return set(["fcoe=%s:%s" % (self.nic, dcbOpt)])
else:
return set(["fcoe=edd:%s" % dcbOpt])
class ZFCPDiskDevice(DiskDevice):
""" A mainframe ZFCP disk. """
_type = "zfcp"
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword hba_id: ???
:keyword wwpn: ???
:keyword fcp_lun: ???
"""
self.hba_id = kwargs.pop("hba_id")
self.wwpn = kwargs.pop("wwpn")
self.fcp_lun = kwargs.pop("fcp_lun")
DiskDevice.__init__(self, device, **kwargs)
def __repr__(self):
s = DiskDevice.__repr__(self)
s += (" hba_id = %(hba_id)s wwpn = %(wwpn)s fcp_lun = %(fcp_lun)s" %
{"hba_id": self.hba_id,
"wwpn": self.wwpn,
"fcp_lun": self.fcp_lun})
return s
@property
def description(self):
return "FCP device %(device)s with WWPN %(wwpn)s and LUN %(lun)s" \
% {'device': self.hba_id,
'wwpn': self.wwpn,
'lun': self.fcp_lun}
def dracutSetupArgs(self):
return set(["rd.zfcp=%s,%s,%s" % (self.hba_id, self.wwpn, self.fcp_lun,)])
class DASDDevice(DiskDevice):
""" A mainframe DASD. """
_type = "dasd"
def __init__(self, device, **kwargs):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword format: this device's formatting
:type format: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword busid: bus ID
:keyword opts: options
:type opts: dict with option name keys and option value values
"""
self.busid = kwargs.pop('busid')
self.opts = kwargs.pop('opts')
DiskDevice.__init__(self, device, **kwargs)
@property
def description(self):
return "DASD device %s" % self.busid
def getOpts(self):
return ["%s=%s" % (k, v) for k, v in self.opts.items() if v == '1']
def dracutSetupArgs(self):
conf = "/etc/dasd.conf"
line = None
if os.path.isfile(conf):
f = open(conf)
# grab the first line that starts with our busID
for l in f.readlines():
if l.startswith(self.busid):
line = l.rstrip()
break
f.close()
# See if we got a line. If not, grab our getOpts
if not line:
line = self.busid
for devopt in self.getOpts():
line += " %s" % devopt
# Create a translation mapping from dasd.conf format to module format
translate = {'use_diag': 'diag',
'readonly': 'ro',
'erplog': 'erplog',
'failfast': 'failfast'}
# this is a really awkward way of determining if the
# feature found is actually desired (1, not 0), plus
# translating that feature into the actual kernel module
# value
opts = []
parts = line.split()
for chunk in parts[1:]:
try:
feat, val = chunk.split('=')
if int(val):
opts.append(translate[feat])
except (ValueError, KeyError):
# If we don't know what the feature is (feat not in translate
# or if we get a val that doesn't cleanly convert to an int
# we can't do anything with it.
log.warning("failed to parse dasd feature %s", chunk)
if opts:
return set(["rd.dasd=%s(%s)" % (self.busid,
":".join(opts))])
else:
return set(["rd.dasd=%s" % self.busid])
| gpl-2.0 |
WQuanfeng/wagtail | wagtail/contrib/wagtailapi/utils.py | 13 | 1299 | from django.conf import settings
from django.utils.six.moves.urllib.parse import urlparse
from wagtail.wagtailcore.models import Page
class BadRequestError(Exception):
pass
class URLPath(object):
"""
This class represents a URL path that should be converted to a full URL.
It is used when the domain that should be used is not known at the time
the URL was generated. It will get resolved to a full URL during
serialisation in api.py.
One example use case is the documents endpoint adding download URLs into
the JSON. The endpoint does not know the domain name to use at the time so
returns one of these instead.
"""
def __init__(self, path):
self.path = path
class ObjectDetailURL(object):
def __init__(self, model, pk):
self.model = model
self.pk = pk
def get_base_url(request=None):
base_url = getattr(settings, 'WAGTAILAPI_BASE_URL', request.site.root_url if request else None)
if base_url:
# We only want the scheme and netloc
base_url_parsed = urlparse(base_url)
return base_url_parsed.scheme + '://' + base_url_parsed.netloc
def pages_for_site(site):
pages = Page.objects.public().live()
pages = pages.descendant_of(site.root_page, inclusive=True)
return pages
| bsd-3-clause |
ruiting/opencog | opencog/python/pln_old/examples/deduction/deduction_example.py | 32 | 2889 | """
PLN Deduction Example
Demonstrates how to run the example in deduction_agent.py when
when interacting with PLN from a standalone Python environment
for development or testing purposes. The normal use case is to
run the example from the CogServer, for which you should use
deduction_agent.py instead.
"""
from __future__ import print_function
from pprint import pprint
from pln.examples.deduction import deduction_agent
from opencog.atomspace import types, AtomSpace, TruthValue
__author__ = 'Cosmo Harrigan'
# Create an AtomSpace with some sample information, equivalent to the
# information in atomspace_contents.scm
atomspace = AtomSpace()
# Basic concepts
frog = atomspace.add_node(types.ConceptNode, 'Frog', TruthValue(0.01, 100))
intelligent = atomspace.add_node(types.ConceptNode,
'Intelligent',
TruthValue(0.05, 100))
slimy = atomspace.add_node(types.ConceptNode, 'Slimy', TruthValue(0.01, 100))
animal = atomspace.add_node(types.ConceptNode, 'Animal', TruthValue(0.1, 100))
being = atomspace.add_node(types.ConceptNode, 'Being', TruthValue(0.1, 100))
moves = atomspace.add_node(types.PredicateNode, 'Moves', TruthValue(0.1, 100))
# Attributes of frogs
atomspace.add_link(types.InheritanceLink,
[frog, intelligent],
TruthValue(0.2, 100))
atomspace.add_link(types.InheritanceLink, [frog, slimy], TruthValue(0.5, 100))
atomspace.add_link(types.InheritanceLink, [frog, animal], TruthValue(0.9, 100))
# Attributes of animals
atomspace.add_link(types.InheritanceLink,
[animal, being],
TruthValue(0.9, 100))
atomspace.add_link(types.InheritanceLink,
[animal, moves],
TruthValue(0.9, 100))
# Peter is a frog
peter = atomspace.add_node(types.ConceptNode, 'Peter', TruthValue(0.001, 100))
atomspace.add_link(types.InheritanceLink, [peter, frog], TruthValue(0.9, 100))
#print('AtomSpace starting contents:')
#atomspace.print_list()
# Test multiple steps of forward inference on the AtomSpace
deduction_agent = deduction_agent.DeductionAgent()
for i in range(1, 500):
result = deduction_agent.run(atomspace)
output = None
input = None
rule = None
if result is not None:
(rule, input, output) = result
if output is not None:
print("\n---- [Step # {0}] ----".format(i))
print("-- Output:\n{0}".format(output[0]))
print("-- Rule:\n{0}".format(rule))
print("\n-- Input:\n{0}".format(input))
print('--- History:')
history = deduction_agent.get_history()
pprint(history)
with open('pln_log.txt', 'w') as logfile:
all_atoms = atomspace.get_atoms_by_type(t=types.Atom)
print('; Number of atoms in atomspace after inference: %d' %
len(all_atoms), file=logfile)
for atom in all_atoms:
print(atom, file=logfile)
| agpl-3.0 |
kohnle-lernmodule/exeLearningPlus1_04 | twisted/pb/test/test_pb.py | 14 | 42742 |
import gc
import sys, re
from twisted.python import log
#log.startLogging(sys.stderr)
from zope.interface import implements, implementsOnly, implementedBy
from twisted.python import components, failure, reflect
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.internet.main import CONNECTION_LOST
from twisted.application.internet import TCPServer
from twisted.pb import schema, pb, tokens, remoteinterface, referenceable
from twisted.pb.tokens import BananaError, Violation, INT, STRING, OPEN
from twisted.pb.slicer import BananaFailure
from twisted.pb import copyable, broker, call
from twisted.pb.remoteinterface import getRemoteInterface
from twisted.pb.remoteinterface import RemoteInterfaceRegistry
try:
from twisted.pb import crypto
except ImportError:
crypto = None
if crypto and not crypto.available:
crypto = None
from twisted.pb.test.common import HelperTarget, RIHelper, TargetMixin
from twisted.pb.test.common import getRemoteInterfaceName
from twisted.pb.negotiate import eventually, flushEventualQueue
class TestRequest(call.PendingRequest):
def __init__(self, reqID, rref=None):
self.answers = []
call.PendingRequest.__init__(self, reqID, rref)
def complete(self, res):
self.answers.append((True, res))
def fail(self, why):
self.answers.append((False, why))
class TestReferenceUnslicer(unittest.TestCase):
# OPEN(reference), INT(refid), [STR(interfacename), INT(version)]... CLOSE
def setUp(self):
self.broker = broker.Broker()
def newUnslicer(self):
unslicer = referenceable.ReferenceUnslicer()
unslicer.broker = self.broker
unslicer.opener = self.broker.rootUnslicer
return unslicer
def testReject(self):
u = self.newUnslicer()
self.failUnlessRaises(BananaError, u.checkToken, STRING, 10)
u = self.newUnslicer()
self.failUnlessRaises(BananaError, u.checkToken, OPEN, 0)
def testNoInterfaces(self):
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
rr1,rr1d = u.receiveClose()
self.failUnless(rr1d is None)
rr2 = self.broker.getTrackerForYourReference(12).getRef()
self.failUnless(rr2)
self.failUnless(isinstance(rr2, referenceable.RemoteReference))
self.failUnlessEqual(rr2.tracker.broker, self.broker)
self.failUnlessEqual(rr2.tracker.clid, 12)
self.failUnlessEqual(rr2.tracker.interfaceName, None)
def testInterfaces(self):
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
u.receiveChild("IBar")
rr1,rr1d = u.receiveClose()
self.failUnless(rr1d is None)
rr2 = self.broker.getTrackerForYourReference(12).getRef()
self.failUnless(rr2)
self.failUnlessIdentical(rr1, rr2)
self.failUnless(isinstance(rr2, referenceable.RemoteReference))
self.failUnlessEqual(rr2.tracker.broker, self.broker)
self.failUnlessEqual(rr2.tracker.clid, 12)
self.failUnlessEqual(rr2.tracker.interfaceName, "IBar")
class TestAnswer(unittest.TestCase):
# OPEN(answer), INT(reqID), [answer], CLOSE
def setUp(self):
self.broker = broker.Broker()
def newUnslicer(self):
unslicer = call.AnswerUnslicer()
unslicer.broker = self.broker
unslicer.opener = self.broker.rootUnslicer
unslicer.protocol = self.broker
return unslicer
def makeRequest(self):
req = call.PendingRequest(defer.Deferred())
def testAccept1(self):
req = TestRequest(12)
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12) # causes broker.getRequest
u.checkToken(STRING, 8)
u.receiveChild("results")
self.failIf(req.answers)
u.receiveClose() # causes broker.gotAnswer
self.failUnlessEqual(req.answers, [(True, "results")])
def testAccept2(self):
req = TestRequest(12)
req.setConstraint(schema.makeConstraint(str))
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12) # causes broker.getRequest
u.checkToken(STRING, 15)
u.receiveChild("results")
self.failIf(req.answers)
u.receiveClose() # causes broker.gotAnswer
self.failUnlessEqual(req.answers, [(True, "results")])
def testReject1(self):
# answer a non-existent request
req = TestRequest(12)
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
self.failUnlessRaises(Violation, u.receiveChild, 13)
def testReject2(self):
# answer a request with a result that violates the constraint
req = TestRequest(12)
req.setConstraint(schema.makeConstraint(int))
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
self.failUnlessRaises(Violation, u.checkToken, STRING, 42)
# this does not yet errback the request
self.failIf(req.answers)
# it gets errbacked when banana reports the violation
v = Violation("icky")
v.setLocation("here")
u.reportViolation(BananaFailure(v))
self.failUnlessEqual(len(req.answers), 1)
err = req.answers[0]
self.failIf(err[0])
f = err[1]
self.failUnless(f.check(Violation))
class RIMyTarget(pb.RemoteInterface):
# method constraints can be declared directly:
add1 = schema.RemoteMethodSchema(_response=int, a=int, b=int)
# or through their function definitions:
def add(a=int, b=int): return int
#add = schema.callable(add) # the metaclass makes this unnecessary
# but it could be used for adding options or something
def join(a=str, b=str, c=int): return str
def getName(): return str
disputed = schema.RemoteMethodSchema(_response=int, a=int)
class RIMyTarget2(pb.RemoteInterface):
__remote_name__ = "RIMyTargetInterface2"
sub = schema.RemoteMethodSchema(_response=int, a=int, b=int)
# For some tests, we want the two sides of the connection to disagree about
# the contents of the RemoteInterface they are using. This is remarkably
# difficult to accomplish within a single process. We do it by creating
# something that behaves just barely enough like a RemoteInterface to work.
class FakeTarget(dict):
pass
RIMyTarget3 = FakeTarget()
RIMyTarget3.__remote_name__ = RIMyTarget.__remote_name__
RIMyTarget3['disputed'] = schema.RemoteMethodSchema(_response=int, a=str)
RIMyTarget3['disputed'].name = "disputed"
RIMyTarget3['disputed'].interface = RIMyTarget3
RIMyTarget3['disputed2'] = schema.RemoteMethodSchema(_response=str, a=int)
RIMyTarget3['disputed2'].name = "disputed"
RIMyTarget3['disputed2'].interface = RIMyTarget3
RIMyTarget3['sub'] = schema.RemoteMethodSchema(_response=int, a=int, b=int)
RIMyTarget3['sub'].name = "sub"
RIMyTarget3['sub'].interface = RIMyTarget3
class Target(pb.Referenceable):
implements(RIMyTarget)
def __init__(self, name=None):
self.calls = []
self.name = name
def getMethodSchema(self, methodname):
return None
def remote_add(self, a, b):
self.calls.append((a,b))
return a+b
remote_add1 = remote_add
def remote_getName(self):
return self.name
def remote_disputed(self, a):
return 24
def remote_fail(self):
raise ValueError("you asked me to fail")
class TargetWithoutInterfaces(Target):
# undeclare the RIMyTarget interface
implementsOnly(implementedBy(pb.Referenceable))
class BrokenTarget(pb.Referenceable):
implements(RIMyTarget)
def remote_add(self, a, b):
return "error"
class IFoo(components.Interface):
# non-remote Interface
pass
class Target2(Target):
implementsOnly(IFoo, RIMyTarget2)
class TestInterface(TargetMixin, unittest.TestCase):
def testTypes(self):
self.failUnless(isinstance(RIMyTarget,
remoteinterface.RemoteInterfaceClass))
self.failUnless(isinstance(RIMyTarget2,
remoteinterface.RemoteInterfaceClass))
def testRegister(self):
reg = RemoteInterfaceRegistry
self.failUnlessEqual(reg["RIMyTarget"], RIMyTarget)
self.failUnlessEqual(reg["RIMyTargetInterface2"], RIMyTarget2)
def testDuplicateRegistry(self):
try:
class RIMyTarget(pb.RemoteInterface):
def foo(bar=int): return int
except remoteinterface.DuplicateRemoteInterfaceError:
pass
else:
self.fail("duplicate registration not caught")
def testInterface1(self):
# verify that we extract the right interfaces from a local object.
# also check that the registry stuff works.
self.setupBrokers()
rr, target = self.setupTarget(Target())
iface = getRemoteInterface(target)
self.failUnlessEqual(iface, RIMyTarget)
iname = getRemoteInterfaceName(target)
self.failUnlessEqual(iname, "RIMyTarget")
self.failUnlessIdentical(RemoteInterfaceRegistry["RIMyTarget"],
RIMyTarget)
rr, target = self.setupTarget(Target2())
iname = getRemoteInterfaceName(target)
self.failUnlessEqual(iname, "RIMyTargetInterface2")
self.failUnlessIdentical(\
RemoteInterfaceRegistry["RIMyTargetInterface2"], RIMyTarget2)
def testInterface2(self):
# verify that RemoteInterfaces have the right attributes
t = Target()
iface = getRemoteInterface(t)
self.failUnlessEqual(iface, RIMyTarget)
# 'add' is defined with 'def'
s1 = RIMyTarget['add']
self.failUnless(isinstance(s1, schema.RemoteMethodSchema))
ok, s2 = s1.getArgConstraint("a")
self.failUnless(ok)
self.failUnless(isinstance(s2, schema.IntegerConstraint))
self.failUnless(s2.checkObject(12) == None)
self.failUnlessRaises(schema.Violation, s2.checkObject, "string")
s3 = s1.getResponseConstraint()
self.failUnless(isinstance(s3, schema.IntegerConstraint))
# 'add1' is defined as a class attribute
s1 = RIMyTarget['add1']
self.failUnless(isinstance(s1, schema.RemoteMethodSchema))
ok, s2 = s1.getArgConstraint("a")
self.failUnless(ok)
self.failUnless(isinstance(s2, schema.IntegerConstraint))
self.failUnless(s2.checkObject(12) == None)
self.failUnlessRaises(schema.Violation, s2.checkObject, "string")
s3 = s1.getResponseConstraint()
self.failUnless(isinstance(s3, schema.IntegerConstraint))
s1 = RIMyTarget['join']
self.failUnless(isinstance(s1.getArgConstraint("a")[1],
schema.StringConstraint))
self.failUnless(isinstance(s1.getArgConstraint("c")[1],
schema.IntegerConstraint))
s3 = RIMyTarget['join'].getResponseConstraint()
self.failUnless(isinstance(s3, schema.StringConstraint))
s1 = RIMyTarget['disputed']
self.failUnless(isinstance(s1.getArgConstraint("a")[1],
schema.IntegerConstraint))
s3 = s1.getResponseConstraint()
self.failUnless(isinstance(s3, schema.IntegerConstraint))
def testInterface3(self):
t = TargetWithoutInterfaces()
iface = getRemoteInterface(t)
self.failIf(iface)
class Unsendable:
pass
class TestCall(TargetMixin, unittest.TestCase):
def setUp(self):
TargetMixin.setUp(self)
self.setupBrokers()
def testCall1(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("add", a=1, b=2)
d.addCallback(lambda res: self.failUnlessEqual(res, 3))
d.addCallback(lambda res: self.failUnlessEqual(target.calls, [(1,2)]))
d.addCallback(self._testCall1_1, rr)
return d
testCall1.timeout = 3
def _testCall1_1(self, res, rr):
# the caller still holds the RemoteReference
self.failUnless(self.callingBroker.yourReferenceByCLID.has_key(1))
# release the RemoteReference. This does two things: 1) the
# callingBroker will forget about it. 2) they will send a decref to
# the targetBroker so *they* can forget about it.
del rr # this fires a DecRef
gc.collect() # make sure
# we need to give it a moment to deliver the DecRef message and act
# on it
d = defer.Deferred()
reactor.callLater(0.1, d.callback, None)
d.addCallback(self._testCall1_2)
return d
def _testCall1_2(self, res):
self.failIf(self.callingBroker.yourReferenceByCLID.has_key(1))
self.failIf(self.targetBroker.myReferenceByCLID.has_key(1))
def testFail1(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("fail")
self.failIf(target.calls)
d.addBoth(self._testFail1_1)
return d
testFail1.timeout = 2
def _testFail1_1(self, f):
# f should be a pb.CopiedFailure
self.failUnless(isinstance(f, failure.Failure),
"Hey, we didn't fail: %s" % f)
self.failUnless(f.check(ValueError),
"wrong exception type: %s" % f)
self.failUnlessSubstring("you asked me to fail", f.value)
def testFail2(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("add", a=1, b=2, c=3)
# add() does not take a 'c' argument, so we get a TypeError here
self.failIf(target.calls)
d.addBoth(self._testFail2_1)
return d
testFail2.timeout = 2
def _testFail2_1(self, f):
self.failUnless(isinstance(f, failure.Failure),
"Hey, we didn't fail: %s" % f)
self.failUnless(f.check(TypeError),
"wrong exception type: %s" % f.type)
self.failUnlessSubstring("remote_add() got an unexpected keyword "
"argument 'c'", f.value)
def testFail3(self):
# this is done without interfaces
rr, target = self.setupTarget(TargetWithoutInterfaces())
d = rr.callRemote("bogus", a=1, b=2)
# the target does not have .bogus method, so we get an AttributeError
self.failIf(target.calls)
d.addBoth(self._testFail3_1)
return d
testFail3.timeout = 2
def _testFail3_1(self, f):
self.failUnless(isinstance(f, failure.Failure),
"Hey, we didn't fail: %s" % f)
self.failUnless(f.check(AttributeError),
"wrong exception type: %s" % f.type)
self.failUnlessSubstring("TargetWithoutInterfaces", str(f))
self.failUnlessSubstring(" has no attribute 'remote_bogus'", str(f))
def testCall2(self):
# server end uses an interface this time, but not the client end
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=3, b=4, _useSchema=False)
# the schema is enforced upon receipt
d.addCallback(lambda res: self.failUnlessEqual(res, 7))
return d
testCall2.timeout = 2
def testCall3(self):
# use interface on both sides
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote('add', 3, 4) # enforces schemas
d.addCallback(lambda res: self.failUnlessEqual(res, 7))
return d
testCall3.timeout = 2
def testCall4(self):
# call through a manually-defined RemoteMethodSchema
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", 3, 4, _methodConstraint=RIMyTarget['add1'])
d.addCallback(lambda res: self.failUnlessEqual(res, 7))
return d
testCall4.timeout = 2
def testFailWrongMethodLocal(self):
# the caller knows that this method does not really exist
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("bogus") # RIMyTarget doesn't implement .bogus()
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongMethodLocal_1)
return d
testFailWrongMethodLocal.timeout = 2
def _testFailWrongMethodLocal_1(self, f):
self.failUnless(f.check(Violation))
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer bogus',
str(f)))
def testFailWrongMethodRemote(self):
# if the target doesn't specify any remote interfaces, then the
# calling side shouldn't try to do any checking. The problem is
# caught on the target side.
rr, target = self.setupTarget(Target(), False)
d = rr.callRemote("bogus") # RIMyTarget doesn't implement .bogus()
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongMethodRemote_1)
return d
testFailWrongMethodRemote.timeout = 2
def _testFailWrongMethodRemote_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("method 'bogus' not defined in RIMyTarget",
str(f))
def testFailWrongMethodRemote2(self):
# call a method which doesn't actually exist. The sender thinks
# they're ok but the recipient catches the violation
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("bogus", _useSchema=False)
# RIMyTarget2 has a 'sub' method, but RIMyTarget (the real interface)
# does not
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongMethodRemote2_1)
d.addCallback(lambda res: self.failIf(target.calls))
return d
testFailWrongMethodRemote2.timeout = 2
def _testFailWrongMethodRemote2_1(self, f):
self.failUnless(f.check(Violation))
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer bogus',
str(f)))
def testFailWrongArgsLocal1(self):
# we violate the interface (extra arg), and the sender should catch it
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b=2, c=3)
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongArgsLocal1_1)
d.addCallback(lambda res: self.failIf(target.calls))
return d
testFailWrongArgsLocal1.timeout = 2
def _testFailWrongArgsLocal1_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("unknown argument 'c'", str(f.value))
def testFailWrongArgsLocal2(self):
# we violate the interface (bad arg), and the sender should catch it
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b="two")
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongArgsLocal2_1)
d.addCallback(lambda res: self.failIf(target.calls))
return d
testFailWrongArgsLocal2.timeout = 2
def _testFailWrongArgsLocal2_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("not a number", str(f.value))
def testFailWrongArgsRemote1(self):
# the sender thinks they're ok but the recipient catches the
# violation
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b="foo", _useSchema=False)
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongArgsRemote1_1)
d.addCallbacks(lambda res: self.failIf(target.calls))
return d
testFailWrongArgsRemote1.timeout = 2
def _testFailWrongArgsRemote1_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("STRING token rejected by IntegerConstraint",
f.value)
self.failUnlessSubstring("at <RootUnslicer>.<methodcall .add arg[b]>",
f.value)
def testFailWrongReturnRemote(self):
rr, target = self.setupTarget(BrokenTarget(), True)
d = rr.callRemote("add", 3, 4) # violates return constraint
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongReturnRemote_1)
return d
testFailWrongReturnRemote.timeout = 2
def _testFailWrongReturnRemote_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("in outbound method results", f.value)
def testFailWrongReturnLocal(self):
# the target returns a value which violates our _resultConstraint
rr, target = self.setupTarget(Target(), True)
d = rr.callRemote("add", a=1, b=2, _resultConstraint=str)
# The target returns an int, which matches the schema they're using,
# so they think they're ok. We've overridden our expectations to
# require a string.
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testFailWrongReturnLocal_1)
# the method should have been run
d.addCallback(lambda res: self.failUnless(target.calls))
return d
testFailWrongReturnLocal.timeout = 2
def _testFailWrongReturnLocal_1(self, f):
self.failUnless(f.check(Violation))
self.failUnlessSubstring("INT token rejected by StringConstraint",
str(f))
self.failUnlessSubstring("in inbound method results", str(f))
self.failUnlessSubstring("at <RootUnslicer>.Answer(req=0)", str(f))
def testDefer(self):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("defer", obj=12)
d.addCallback(lambda res: self.failUnlessEqual(res, 12))
return d
testDefer.timeout = 2
def testDisconnect1(self):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("hang")
e = RuntimeError("lost connection")
rr.tracker.broker.transport.loseConnection(e)
d.addCallbacks(lambda res: self.fail("should have failed"),
lambda why: why.trap(RuntimeError) and None)
return d
testDisconnect1.timeout = 2
def disconnected(self):
self.lost = 1
def testDisconnect2(self):
rr, target = self.setupTarget(HelperTarget())
self.lost = 0
rr.notifyOnDisconnect(self.disconnected)
rr.tracker.broker.transport.loseConnection(CONNECTION_LOST)
d = eventually()
d.addCallback(lambda res: self.failUnless(self.lost))
return d
def testDisconnect3(self):
rr, target = self.setupTarget(HelperTarget())
self.lost = 0
rr.notifyOnDisconnect(self.disconnected)
rr.dontNotifyOnDisconnect(self.disconnected)
rr.tracker.broker.transport.loseConnection(CONNECTION_LOST)
d = eventually()
d.addCallback(lambda res: self.failIf(self.lost))
return d
def testUnsendable(self):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=Unsendable())
d.addCallbacks(lambda res: self.fail("should have failed"),
self._testUnsendable_1)
return d
testUnsendable.timeout = 2
def _testUnsendable_1(self, why):
self.failUnless(why.check(Violation))
self.failUnlessSubstring("cannot serialize", why.value.args[0])
class TestReferenceable(TargetMixin, unittest.TestCase):
# test how a Referenceable gets transformed into a RemoteReference as it
# crosses the wire, then verify that it gets transformed back into the
# original Referenceable when it comes back. Also test how shared
# references to the same object are handled.
def setUp(self):
TargetMixin.setUp(self)
self.setupBrokers()
if 0:
print
self.callingBroker.doLog = "TX"
self.targetBroker.doLog = " rx"
def send(self, arg):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=arg)
d.addCallback(self.failUnless)
d.addCallback(lambda res: target.obj)
return d
def send2(self, arg1, arg2):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set2", obj1=arg1, obj2=arg2)
d.addCallback(self.failUnless)
d.addCallback(lambda res: (target.obj1, target.obj2))
return d
def echo(self, arg):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("echo", obj=arg)
return d
def testRef1(self):
# Referenceables turn into RemoteReferences
r = Target()
d = self.send(r)
d.addCallback(self._testRef1_1, r)
return d
def _testRef1_1(self, res, r):
t = res.tracker
self.failUnless(isinstance(res, referenceable.RemoteReference))
self.failUnlessEqual(t.broker, self.targetBroker)
self.failUnless(type(t.clid) is int)
self.failUnless(self.callingBroker.getMyReferenceByCLID(t.clid) is r)
self.failUnlessEqual(t.interfaceName, 'RIMyTarget')
def testRef2(self):
# sending a Referenceable over the wire multiple times should result
# in equivalent RemoteReferences
r = Target()
d = self.send(r)
d.addCallback(self._testRef2_1, r)
return d
def _testRef2_1(self, res1, r):
d = self.send(r)
d.addCallback(self._testRef2_2, res1)
return d
def _testRef2_2(self, res2, res1):
self.failUnless(res1 == res2)
self.failUnless(res1 is res2) # newpb does this, oldpb didn't
def testRef3(self):
# sending the same Referenceable in multiple arguments should result
# in equivalent RRs
r = Target()
d = self.send2(r, r)
d.addCallback(self._testRef3_1)
return d
def _testRef3_1(self, (res1, res2)):
self.failUnless(res1 == res2)
self.failUnless(res1 is res2)
def testRef4(self):
# sending the same Referenceable in multiple calls will result in
# equivalent RRs
r = Target()
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=r)
d.addCallback(self._testRef4_1, rr, r, target)
return d
def _testRef4_1(self, res, rr, r, target):
res1 = target.obj
d = rr.callRemote("set", obj=r)
d.addCallback(self._testRef4_2, target, res1)
return d
def _testRef4_2(self, res, target, res1):
res2 = target.obj
self.failUnless(res1 == res2)
self.failUnless(res1 is res2)
def testRef5(self):
# those RemoteReferences can be used to invoke methods on the sender.
# 'r' lives on side A. The anonymous target lives on side B. From
# side A we invoke B.set(r), and we get the matching RemoteReference
# 'rr' which lives on side B. Then we use 'rr' to invoke r.getName
# from side A.
r = Target()
r.name = "ernie"
d = self.send(r)
d.addCallback(lambda rr: rr.callRemote("getName"))
d.addCallback(self.failUnlessEqual, "ernie")
return d
def testRef6(self):
# Referenceables survive round-trips
r = Target()
d = self.echo(r)
d.addCallback(self.failUnlessIdentical, r)
return d
def NOTtestRemoteRef1(self):
# known URLRemoteReferences turn into Referenceables
root = Target()
rr, target = self.setupTarget(HelperTarget())
self.targetBroker.factory = pb.PBServerFactory(root)
urlRRef = self.callingBroker.remoteReferenceForName("", [])
# urlRRef points at root
d = rr.callRemote("set", obj=urlRRef)
self.failUnless(dr(d))
self.failUnlessIdentical(target.obj, root)
def NOTtestRemoteRef2(self):
# unknown URLRemoteReferences are errors
root = Target()
rr, target = self.setupTarget(HelperTarget())
self.targetBroker.factory = pb.PBServerFactory(root)
urlRRef = self.callingBroker.remoteReferenceForName("bogus", [])
# urlRRef points at nothing
d = rr.callRemote("set", obj=urlRRef)
f = de(d)
#print f
#self.failUnlessEqual(f.type, tokens.Violation)
self.failUnlessEqual(type(f.value), str)
self.failUnless(f.value.find("unknown clid 'bogus'") != -1)
def testArgs1(self):
# sending the same non-Referenceable object in multiple calls results
# in distinct objects, because the serialization scope is bounded by
# each method call
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=r)
d.addCallback(self._testArgs1_1, rr, r, target)
# TODO: also make sure the original list goes out of scope once the
# method call has finished, to guard against a leaky
# reference-tracking implementation.
return d
def _testArgs1_1(self, res, rr, r, target):
res1 = target.obj
d = rr.callRemote("set", obj=r)
d.addCallback(self._testArgs1_2, target, res1)
return d
def _testArgs1_2(self, res, target, res1):
res2 = target.obj
self.failUnless(res1 == res2)
self.failIf(res1 is res2)
def testArgs2(self):
# but sending them as multiple arguments of the *same* method call
# results in identical objects
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set2", obj1=r, obj2=r)
d.addCallback(self._testArgs2_1, rr, target)
return d
def _testArgs2_1(self, res, rr, target):
self.failUnlessIdentical(target.obj1, target.obj2)
def testAnswer1(self):
# also, shared objects in a return value should be shared
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
target.obj = (r,r)
d = rr.callRemote("get")
d.addCallback(lambda res: self.failUnlessIdentical(res[0], res[1]))
return d
def testAnswer2(self):
# but objects returned by separate method calls should be distinct
rr, target = self.setupTarget(HelperTarget())
r = [1,2]
target.obj = r
d = rr.callRemote("get")
d.addCallback(self._testAnswer2_1, rr, target)
return d
def _testAnswer2_1(self, res1, rr, target):
d = rr.callRemote("get")
d.addCallback(self._testAnswer2_2, res1)
return d
def _testAnswer2_2(self, res2, res1):
self.failUnless(res1 == res2)
self.failIf(res1 is res2)
class TestFactory(unittest.TestCase):
def setUp(self):
self.client = None
self.server = None
def gotReference(self, ref):
self.client = ref
def tearDown(self):
if self.client:
self.client.broker.transport.loseConnection()
if self.server:
return self.server.stopListening()
class TestCallable(unittest.TestCase):
def setUp(self):
self.services = [pb.PBService(), pb.PBService()]
self.tubA, self.tubB = self.services
for s in self.services:
s.startService()
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("localhost:%d" % l.getPortnum())
def tearDown(self):
return defer.DeferredList([s.stopService() for s in self.services])
def testBoundMethod(self):
target = Target()
meth_url = self.tubB.registerReference(target.remote_add)
d = self.tubA.getReference(meth_url)
d.addCallback(self._testBoundMethod_1)
return d
testBoundMethod.timeout = 5
def _testBoundMethod_1(self, ref):
self.failUnless(isinstance(ref, referenceable.RemoteMethodReference))
#self.failUnlessEqual(ref.getSchemaName(),
# RIMyTarget.__remote_name__ + "/remote_add")
d = ref.callRemote(a=1, b=2)
d.addCallback(lambda res: self.failUnlessEqual(res, 3))
return d
def testFunction(self):
l = []
# we need a keyword arg here
def append(what):
l.append(what)
func_url = self.tubB.registerReference(append)
d = self.tubA.getReference(func_url)
d.addCallback(self._testFunction_1, l)
return d
testFunction.timeout = 5
def _testFunction_1(self, ref, l):
self.failUnless(isinstance(ref, referenceable.RemoteMethodReference))
d = ref.callRemote(what=12)
d.addCallback(lambda res: self.failUnlessEqual(l, [12]))
return d
class TestService(unittest.TestCase):
def setUp(self):
self.services = [pb.PBService()]
self.services[0].startService()
def tearDown(self):
return defer.DeferredList([s.stopService() for s in self.services])
def testRegister(self):
s = self.services[0]
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("localhost:%d" % l.getPortnum())
t1 = Target()
public_url = s.registerReference(t1, "target")
if crypto:
self.failUnless(public_url.startswith("pb://"))
self.failUnless(public_url.endswith("@localhost:%d/target"
% l.getPortnum()))
else:
self.failUnlessEqual(public_url,
"pbu://localhost:%d/target"
% l.getPortnum())
self.failUnlessEqual(s.registerReference(t1, "target"), public_url)
self.failUnlessIdentical(s.getReferenceForURL(public_url), t1)
t2 = Target()
private_url = s.registerReference(t2)
self.failUnlessEqual(s.registerReference(t2), private_url)
self.failUnlessIdentical(s.getReferenceForURL(private_url), t2)
s.unregisterURL(public_url)
self.failUnlessRaises(KeyError, s.getReferenceForURL, public_url)
s.unregisterReference(t2)
self.failUnlessRaises(KeyError, s.getReferenceForURL, private_url)
# TODO: check what happens when you register the same referenceable
# under multiple URLs
def getRef(self, target):
self.services.append(pb.PBService())
s1 = self.services[0]
s2 = self.services[1]
s2.startService()
l = s1.listenOn("tcp:0:interface=127.0.0.1")
s1.setLocation("localhost:%d" % l.getPortnum())
public_url = s1.registerReference(target, "target")
d = s2.getReference(public_url)
return d
def testConnect1(self):
t1 = TargetWithoutInterfaces()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect1, t1)
return d
testConnect1.timeout = 5
def _testConnect1(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testConnect2(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect2, t1)
return d
testConnect2.timeout = 5
def _testConnect2(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testConnect3(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect3, t1)
return d
testConnect3.timeout = 5
def _testConnect3(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testStatic(self):
# make sure we can register static data too, at least hashable ones
t1 = (1,2,3)
d = self.getRef(t1)
d.addCallback(lambda ref: self.failUnlessEqual(ref, (1,2,3)))
return d
testStatic.timeout = 2
def testBadMethod(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('missing', a=2, b=3))
d.addCallbacks(self._testBadMethod_cb, self._testBadMethod_eb)
return d
testBadMethod.timeout = 5
def _testBadMethod_cb(self, res):
self.fail("method wasn't supposed to work")
def _testBadMethod_eb(self, f):
#self.failUnlessEqual(f.type, 'twisted.pb.tokens.Violation')
self.failUnlessEqual(f.type, Violation)
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer missing',
str(f)))
def testBadMethod2(self):
t1 = TargetWithoutInterfaces()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('missing', a=2, b=3))
d.addCallbacks(self._testBadMethod_cb, self._testBadMethod2_eb)
return d
testBadMethod2.timeout = 5
def _testBadMethod2_eb(self, f):
self.failUnlessEqual(f.type, 'exceptions.AttributeError')
self.failUnlessSubstring("TargetWithoutInterfaces", f.value)
self.failUnlessSubstring(" has no attribute 'remote_missing'", f.value)
class ThreeWayHelper:
passed = False
def start(self):
d = pb.getRemoteURL_TCP("localhost", self.portnum1, "", RIHelper)
d.addCallback(self.step2)
d.addErrback(self.err)
return d
def step2(self, remote1):
# .remote1 is our RRef to server1's "t1" HelperTarget
self.clients.append(remote1)
self.remote1 = remote1
d = pb.getRemoteURL_TCP("localhost", self.portnum2, "", RIHelper)
d.addCallback(self.step3)
return d
def step3(self, remote2):
# and .remote2 is our RRef to server2's "t2" helper target
self.clients.append(remote2)
self.remote2 = remote2
# sending a RemoteReference back to its source should be ok
d = self.remote1.callRemote("set", obj=self.remote1)
d.addCallback(self.step4)
return d
def step4(self, res):
assert self.target1.obj is self.target1
# but sending one to someone else is not
d = self.remote2.callRemote("set", obj=self.remote1)
d.addCallback(self.step5_callback)
d.addErrback(self.step5_errback)
return d
def step5_callback(self, res):
why = unittest.FailTest("sending a 3rd-party reference did not fail")
self.err(failure.Failure(why))
return None
def step5_errback(self, why):
bad = None
if why.type != tokens.Violation:
bad = "%s failure should be a Violation" % why.type
elif why.value.args[0].find("RemoteReferences can only be sent back to their home Broker") == -1:
bad = "wrong error message: '%s'" % why.value.args[0]
if bad:
why = unittest.FailTest(bad)
self.passed = failure.Failure(why)
else:
self.passed = True
def err(self, why):
self.passed = why
class Test3Way(unittest.TestCase):
# Here we test the three-party introduction process as depicted in the
# classic Granovetter diagram. Alice has a reference to Bob and another
# one to Carol. Alice wants to give her Carol-reference to Bob, by
# including it as the argument to a method she invokes on her
# Bob-reference.
def setUp(self):
self.services = [pb.PBService(), pb.PBService(), pb.PBService()]
self.tubA, self.tubB, self.tubC = self.services
for s in self.services:
s.startService()
l = s.listenOn("tcp:0:interface=127.0.0.1")
s.setLocation("localhost:%d" % l.getPortnum())
def tearDown(self):
return defer.DeferredList([s.stopService() for s in self.services])
def testGift(self):
# we must start by giving Alice a reference to both Bob and Carol.
self.bob = HelperTarget("bob")
self.bob_url = self.tubB.registerReference(self.bob)
self.carol = HelperTarget("carol")
self.carol_url = self.tubC.registerReference(self.carol)
# now, from Alice's point of view:
d = self.tubA.getReference(self.bob_url)
d.addCallback(self._aliceGotBob)
return d
testGift.timeout = 2
def _aliceGotBob(self, abob):
self.abob = abob # Alice's reference to Bob
d = self.tubA.getReference(self.carol_url)
d.addCallback(self._aliceGotCarol)
return d
def _aliceGotCarol(self, acarol):
self.acarol = acarol # Alice's reference to Carol
d2 = self.bob.waitfor()
d = self.abob.callRemote("set", obj=self.acarol) # send the gift
# TODO: at this point, 'del self.acarol' should not lose alice's
# reference to carol, because it will still be in the gift table. The
# trick is how to test that, we would need a way to stall the gift
# delivery while we verify everything
d.addCallback(lambda res: d2)
d.addCallback(self._bobGotCarol)
return d
def _bobGotCarol(self, bcarol):
# Bob has received the gift
self.bcarol = bcarol
# alice's gift table should be empty
brokerAB = self.abob.tracker.broker
self.failIf(brokerAB.myGifts)
self.failIf(brokerAB.myGiftsByGiftID)
d2 = self.carol.waitfor()
d = self.bcarol.callRemote("set", obj=12)
d.addCallback(lambda res: d2)
d.addCallback(self._carolCalled)
return d
def _carolCalled(self, res):
self.failUnlessEqual(res, 12)
# TODO:
# when the Violation is remote, it is reported in a CopiedFailure, which
# means f.type is a string. When it is local, it is reported in a Failure,
# and f.type is the tokens.Violation class. I'm not sure how I feel about
# these being different.
# TODO: tests to port from oldpb suite
# testTooManyRefs: sending pb.MAX_BROKER_REFS across the wire should die
# testFactoryCopy?
# tests which aren't relevant right now but which might be once we port the
# corresponding functionality:
#
# testObserve, testCache (pb.Cacheable)
# testViewPoint
# testPublishable (spread.publish??)
# SpreadUtilTestCase (spread.util)
# NewCredTestCase
# tests which aren't relevant and aren't like to ever be
#
# PagingTestCase
# ConnectionTestCase (oldcred)
# NSPTestCase
| gpl-2.0 |
vimeworks/ImpaQto | coworkersimpaqto/models.py | 1 | 4211 | from django.db import models
from django.template.defaultfilters import default
# Create your models here.
class Membresia(models.Model):
MODALIDAD_CHOICES=(
#('D','Diario'),
('M','Mensual'),
#('S','Semestral'),
#('A','Anual'),
)
STATE_CHOICES=(
('A','Activo'),
('I','Inactivo'),
)
nombre = models.TextField("Nombre de la membresía")
uso_espacio = models.IntegerField("Uso de Espacio")
modalidad = models.CharField("Modalidad de la membresía",max_length=1,choices=MODALIDAD_CHOICES)
estado = models.CharField("Estado de la membresía",max_length=1,choices=STATE_CHOICES)
def __str__(self):
return self.nombre
def __unicode__(self):
return self.nombre
class Coworker(models.Model):
nombre = models.CharField("Nombre del Coworker",max_length=250)
apellido = models.CharField("Apellido del Coworker",max_length=250)
mail= models.EmailField("Correo Electrónico del Coworker",unique=True,null=False,blank=True)
username = models.CharField("Usuario",max_length=16,null=False,blank=True)
def __str__(self):
return '%s %s'%(self.nombre,self.apellido)
def mail_default(self):
return {"mail":"[email protected]"}
class Meta:
ordering = ["apellido"]
verbose_name_plural="Coworker's"
class Contrato(models.Model):
ACTIVO='A'
INACTIVO='I'
ESTADO_CHOICES=(
(ACTIVO,'Activo'),
(INACTIVO,'Inactivo'),
)
coworker = models.ForeignKey(Coworker,verbose_name="Nombre del Coworkers")
membresia = models.ForeignKey(Membresia,verbose_name="Nombre de la membresía")
fecha_inicio = models.DateField()
fecha_fin = models.DateField(null=True,blank=True)
estado = models.CharField("Estado del contrato",max_length=1,choices=ESTADO_CHOICES,default=ACTIVO)
minutos_mes = models.DecimalField(decimal_places=2,max_digits=10,null=True,blank=True)
def __str__(self):
return '%s %s'%(self.coworker,self.membresia)
class Meta:
order_with_respect_to = 'coworker'
verbose_name_plural="Planes - Coworker's"
class ControlConsumo(models.Model):
mes = models.IntegerField()
anio = models.IntegerField("Año")
control_minutos = models.DecimalField(decimal_places=2,max_digits=10,null=True,blank=True)
contrato = models.ForeignKey(Contrato,verbose_name="Contrato a elegir")
def __str__(self):
return 'En %s del % '%(self.mes,self.anio)
class Meta:
ordering = ["anio"]
verbose_name_plural = "Resumen del Consumo"
class ManejadorConsumo(models.Manager):
def resumen_dias(self,mes,anio):
from django.db import connection
cursor = connection.cursor()
cursor.execute("""
SELECT date_part('day',c.fecha_entrada) as day, SUM(c.minutos) as minutos
FROM coworkersimpaqto_consumo c
WHERE date_part('month', c.fecha_entrada) = %s
AND date_part('year', c.fecha_entrada) = %s
GROUP BY day
ORDER BY day""",[mes,anio])
lista_resultados =[]
for row in cursor.fetchall():
p =self.model(minutos=row[1])
p.dia = row[0]
p.resumen_minutos = row[1]
lista_resultados.append(p)
return lista_resultados
class Consumo(models.Model):
ENTRADA ='E'
SALIDA = 'S'
REGISTRO_CHOICES=(
(ENTRADA,'Entrada'),
(SALIDA,'Salida'),
)
estado_registro = models.CharField("Registro de ",max_length=1,choices = REGISTRO_CHOICES,default=ENTRADA)
fecha_entrada = models.DateTimeField(auto_now_add=True,null=True,blank=True)
fecha_salida = models.DateTimeField(null=True,blank=True)
minutos = models.DecimalField(decimal_places=2,max_digits=10,null=True,blank=True)
control_consumo = models.ForeignKey(ControlConsumo,verbose_name="Control Consumo",null=False,blank=False)
objects = models.Manager()
reporte = ManejadorConsumo()
def __str__(self):
return '%s '%(self.control_consumo)
class Meta:
ordering = ["fecha_entrada"]
verbose_name_plural = "Asistencia"
| mit |
cloudbase/nova | nova/tests/unit/api_samples_test_base/test_compare_result.py | 10 | 16266 | # Copyright 2015 HPE, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import testtools
from nova import test
from nova.tests.functional import api_samples_test_base
class TestCompareResult(test.NoDBTestCase):
"""Provide test coverage for result comparison logic in functional tests.
_compare_result two types of comparisons, template data and sample
data.
Template data means the response is checked against a regex that is
referenced by the template name. The template name is specified in
the format %(name)
Sample data is a normal value comparison.
"""
def getApiSampleTestBaseHelper(self):
"""Build an instance without running any unwanted test methods"""
# NOTE(auggy): TestCase takes a "test" method name to run in __init__
# calling this way prevents additional test methods from running
ast_instance = api_samples_test_base.ApiSampleTestBase('setUp')
# required by ApiSampleTestBase
ast_instance.api_major_version = 'v2'
ast_instance._project_id = 'True'
# automagically create magic methods usually handled by test classes
ast_instance.compute = mock.MagicMock()
ast_instance.subs = ast_instance._get_regexes()
return ast_instance
def setUp(self):
super(TestCompareResult, self).setUp()
self.ast = self.getApiSampleTestBaseHelper()
def test_bare_strings_match(self):
"""compare 2 bare strings that match"""
sample_data = u'foo'
response_data = u'foo'
result = self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
# NOTE(auggy): _compare_result will not return a matched value in the
# case of bare strings. If they don't match it will throw an exception,
# otherwise it returns "None".
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of 2 bare strings')
def test_bare_strings_no_match(self):
"""check 2 bare strings that don't match"""
sample_data = u'foo'
response_data = u'bar'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_template_strings_match(self):
"""compare 2 template strings (contain %) that match"""
template_data = u'%(id)s'
response_data = u'858f295a-8543-45fa-804a-08f8356d616d'
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=response_data,
observed=result,
message='Check _compare_result of 2 template strings')
def test_template_strings_no_match(self):
"""check 2 template strings (contain %) that don't match"""
template_data = u'%(id)s'
response_data = u'$58f295a-8543-45fa-804a-08f8356d616d'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
# TODO(auggy): _compare_result needs a consistent return value
# In some cases it returns the value if it matched, in others it returns
# None. In all cases, it throws an exception if there's no match.
def test_bare_int_match(self):
"""check 2 bare ints that match"""
sample_data = 42
response_data = 42
result = self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of 2 bare ints')
def test_bare_int_no_match(self):
"""check 2 bare ints that don't match"""
sample_data = 42
response_data = 43
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
# TODO(auggy): _compare_result needs a consistent return value
def test_template_int_match(self):
"""check template int against string containing digits"""
template_data = u'%(int)s'
response_data = u'42'
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of template ints')
def test_template_int_no_match(self):
"""check template int against a string containing no digits"""
template_data = u'%(int)s'
response_data = u'foo'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_template_int_value(self):
"""check an int value of a template int throws exception"""
# template_data = u'%(int_test)'
# response_data = 42
# use an int instead of a string as the subs value
local_subs = copy.deepcopy(self.ast.subs)
local_subs.update({'int_test': 42})
with testtools.ExpectedException(TypeError):
self.ast.subs = local_subs
# TODO(auggy): _compare_result needs a consistent return value
def test_dict_match(self):
"""check 2 matching dictionaries"""
template_data = {
u'server': {
u'id': u'%(id)s',
u'adminPass': u'%(password)s'
}
}
response_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'4ZQ3bb6WYbC2'}
}
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=u'858f295a-8543-45fa-804a-08f8356d616d',
observed=result,
message='Check _compare_result of 2 dictionaries')
def test_dict_no_match_value(self):
"""check 2 dictionaries where one has a different value"""
sample_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'foo'
}
}
response_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'4ZQ3bb6WYbC2'}
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_dict_no_match_extra_key(self):
"""check 2 dictionaries where one has an extra key"""
template_data = {
u'server': {
u'id': u'%(id)s',
u'adminPass': u'%(password)s',
u'foo': u'foo'
}
}
response_data = {
u'server': {
u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
u'adminPass': u'4ZQ3bb6WYbC2'}
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_dict_result_type_mismatch(self):
"""check expected is a dictionary and result is not a dictionary"""
template_data = {
u'server': {
u'id': u'%(id)s',
u'adminPass': u'%(password)s',
}
}
response_data = u'foo'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
# TODO(auggy): _compare_result needs a consistent return value
def test_list_match(self):
"""check 2 matching lists"""
template_data = {
u'links':
[
{
u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
u'rel': u'self'
},
{
u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
u'rel': u'bookmark'
}
]
}
response_data = {
u'links':
[
{
u'href':
(u'http://openstack.example.com/v2/%s/server/'
'858f295a-8543-45fa-804a-08f8356d616d' %
api_samples_test_base.PROJECT_ID
),
u'rel': u'self'
},
{
u'href':
(u'http://openstack.example.com/%s/servers/'
'858f295a-8543-45fa-804a-08f8356d616d' %
api_samples_test_base.PROJECT_ID
),
u'rel': u'bookmark'
}
]
}
result = self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of 2 lists')
def test_list_match_extra_item_result(self):
"""check extra list items in result """
template_data = {
u'links':
[
{
u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
u'rel': u'self'
},
{
u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
u'rel': u'bookmark'
}
]
}
response_data = {
u'links':
[
{
u'href':
(u'http://openstack.example.com/v2/openstack/server/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'self'
},
{
u'href':
(u'http://openstack.example.com/openstack/servers/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'bookmark'
},
u'foo'
]
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_list_match_extra_item_template(self):
"""check extra list items in template """
template_data = {
u'links':
[
{
u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
u'rel': u'self'
},
{
u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
u'rel': u'bookmark'
},
u'foo' # extra field
]
}
response_data = {
u'links':
[
{
u'href':
(u'http://openstack.example.com/v2/openstack/server/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'self'
},
{
u'href':
(u'http://openstack.example.com/openstack/servers/'
'858f295a-8543-45fa-804a-08f8356d616d'),
u'rel': u'bookmark'
}
]
}
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_list_no_match(self):
"""check 2 matching lists"""
template_data = {
u'things':
[
{
u'foo': u'bar',
u'baz': 0
},
{
u'foo': u'zod',
u'baz': 1
}
]
}
response_data = {
u'things':
[
{
u'foo': u'bar',
u'baz': u'0'
},
{
u'foo': u'zod',
u'baz': 1
}
]
}
# TODO(auggy): This error returns "extra list items"
# it should show the item/s in the list that didn't match
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
def test_none_match(self):
"""check that None matches"""
sample_data = None
response_data = None
result = self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
# NOTE(auggy): _compare_result will not return a matched value in the
# case of bare strings. If they don't match it will throw an exception,
# otherwise it returns "None".
self.assertEqual(
expected=None,
observed=result,
message='Check _compare_result of None')
def test_none_no_match(self):
"""check expected none and non-None response don't match"""
sample_data = None
response_data = u'bar'
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_none_result_no_match(self):
"""check result none and expected non-None response don't match"""
sample_data = u'foo'
response_data = None
with testtools.ExpectedException(api_samples_test_base.NoMatch):
self.ast._compare_result(
expected=sample_data,
result=response_data,
result_str="Test")
def test_template_no_subs_key(self):
"""check an int value of a template int throws exception"""
template_data = u'%(foo)'
response_data = 'bar'
with testtools.ExpectedException(KeyError):
self.ast._compare_result(
expected=template_data,
result=response_data,
result_str="Test")
| apache-2.0 |
akiss77/servo | etc/ci/performance/test_differ.py | 77 | 1744 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import json
parser = argparse.ArgumentParser(description="Diff between two runs of performance tests.")
parser.add_argument("file1", help="the first output json from runner")
parser.add_argument("file2", help="the second output json from runner")
args = parser.parse_args()
def load_data(filename):
with open(filename, 'r') as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
key = record.get('testcase')
value = record.get('domComplete') - record.get('domLoading')
totals[key] = totals.get('key', 0) + value
counts[key] = counts.get('key', 0) + 1
results[key] = round(totals[key] / counts[key])
return results
data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
END = '\033[0m'
for key in keys:
value1 = data1.get(key)
value2 = data2.get(key)
if value1 and not(value2):
print ("{}Test {}: missing from {}.{}".format(WARNING, key, args.file2, END))
elif value2 and not(value1):
print ("{}Test {}: missing from {}.{}".format(WARNING, key, args.file1, END))
elif value1 and value2:
diff = value2 - value1
change = diff / value1
color = BLUE if value1 <= value2 else GREEN
print("{}{:6} {:6} {:+6} {:+8.2%} {}.{}".format(color, value1, value2, diff, change, key, END))
| mpl-2.0 |
legendtang/mitmproxy | libmproxy/main.py | 11 | 3548 | from __future__ import print_function, absolute_import
import os
import signal
import sys
import netlib.version
import netlib.version_check
from . import version, cmdline
from .proxy import process_proxy_options, ProxyServerError
from .proxy.server import DummyServer, ProxyServer
def assert_utf8_env():
spec = ""
for i in ["LANG", "LC_CTYPE", "LC_ALL"]:
spec += os.environ.get(i, "").lower()
if "utf" not in spec:
print(
"Error: mitmproxy requires a UTF console environment.",
file=sys.stderr
)
print(
"Set your LANG enviroment variable to something like en_US.UTF-8",
file=sys.stderr
)
sys.exit(1)
def get_server(dummy_server, options):
if dummy_server:
return DummyServer(options)
else:
try:
return ProxyServer(options)
except ProxyServerError as v:
print(str(v), file=sys.stderr)
sys.exit(1)
def mitmproxy(args=None): # pragma: nocover
from . import console
netlib.version_check.version_check(version.IVERSION)
assert_utf8_env()
parser = cmdline.mitmproxy()
options = parser.parse_args(args)
if options.quiet:
options.verbose = 0
proxy_config = process_proxy_options(parser, options)
console_options = console.Options(**cmdline.get_common_options(options))
console_options.palette = options.palette
console_options.palette_transparent = options.palette_transparent
console_options.eventlog = options.eventlog
console_options.intercept = options.intercept
console_options.limit = options.limit
server = get_server(console_options.no_server, proxy_config)
m = console.ConsoleMaster(server, console_options)
try:
m.run()
except KeyboardInterrupt:
pass
def mitmdump(args=None): # pragma: nocover
from . import dump
netlib.version_check.version_check(version.IVERSION)
parser = cmdline.mitmdump()
options = parser.parse_args(args)
if options.quiet:
options.verbose = 0
options.flow_detail = 0
proxy_config = process_proxy_options(parser, options)
dump_options = dump.Options(**cmdline.get_common_options(options))
dump_options.flow_detail = options.flow_detail
dump_options.keepserving = options.keepserving
dump_options.filtstr = " ".join(options.args) if options.args else None
server = get_server(dump_options.no_server, proxy_config)
try:
master = dump.DumpMaster(server, dump_options)
def cleankill(*args, **kwargs):
master.shutdown()
signal.signal(signal.SIGTERM, cleankill)
master.run()
except dump.DumpError as e:
print("mitmdump: %s" % e, file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
def mitmweb(args=None): # pragma: nocover
from . import web
netlib.version_check.version_check(version.IVERSION)
parser = cmdline.mitmweb()
options = parser.parse_args(args)
if options.quiet:
options.verbose = 0
proxy_config = process_proxy_options(parser, options)
web_options = web.Options(**cmdline.get_common_options(options))
web_options.intercept = options.intercept
web_options.wdebug = options.wdebug
web_options.wiface = options.wiface
web_options.wport = options.wport
server = get_server(web_options.no_server, proxy_config)
m = web.WebMaster(server, web_options)
try:
m.run()
except KeyboardInterrupt:
pass
| mit |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/rest_framework/serializers.py | 3 | 60354 | """
Serializers and ModelSerializers are similar to Forms and ModelForms.
Unlike forms, they are not constrained to dealing with HTML output, and
form encoded input.
Serialization in REST framework is a two-phase process:
1. Serializers marshal between complex types like model instances, and
python primitives.
2. The process of marshalling between python primitives and request and
response content is handled by parsers and renderers.
"""
from __future__ import unicode_literals
import copy
import inspect
import traceback
from collections import OrderedDict
from django.core.exceptions import ValidationError as DjangoValidationError
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models import DurationField as ModelDurationField
from django.db.models.fields import Field as DjangoModelField
from django.db.models.fields import FieldDoesNotExist
from django.utils import six, timezone
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import JSONField as ModelJSONField
from rest_framework.compat import postgres_fields, set_many, unicode_to_repr
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.fields import get_error_detail, set_value
from rest_framework.settings import api_settings
from rest_framework.utils import html, model_meta, representation
from rest_framework.utils.field_mapping import (
ClassLookupDict, get_field_kwargs, get_nested_relation_kwargs,
get_relation_kwargs, get_url_kwargs
)
from rest_framework.utils.serializer_helpers import (
BindingDict, BoundField, NestedBoundField, ReturnDict, ReturnList
)
from rest_framework.validators import (
UniqueForDateValidator, UniqueForMonthValidator, UniqueForYearValidator,
UniqueTogetherValidator
)
# Note: We do the following so that users of the framework can use this style:
#
# example_field = serializers.CharField(...)
#
# This helps keep the separation between model fields, form fields, and
# serializer fields more explicit.
from rest_framework.fields import ( # NOQA # isort:skip
BooleanField, CharField, ChoiceField, DateField, DateTimeField, DecimalField,
DictField, DurationField, EmailField, Field, FileField, FilePathField, FloatField,
HiddenField, IPAddressField, ImageField, IntegerField, JSONField, ListField,
ModelField, MultipleChoiceField, NullBooleanField, ReadOnlyField, RegexField,
SerializerMethodField, SlugField, TimeField, URLField, UUIDField,
)
from rest_framework.relations import ( # NOQA # isort:skip
HyperlinkedIdentityField, HyperlinkedRelatedField, ManyRelatedField,
PrimaryKeyRelatedField, RelatedField, SlugRelatedField, StringRelatedField,
)
# Non-field imports, but public API
from rest_framework.fields import ( # NOQA # isort:skip
CreateOnlyDefault, CurrentUserDefault, SkipField, empty
)
from rest_framework.relations import Hyperlink, PKOnlyObject # NOQA # isort:skip
# We assume that 'validators' are intended for the child serializer,
# rather than the parent serializer.
LIST_SERIALIZER_KWARGS = (
'read_only', 'write_only', 'required', 'default', 'initial', 'source',
'label', 'help_text', 'style', 'error_messages', 'allow_empty',
'instance', 'data', 'partial', 'context', 'allow_null'
)
ALL_FIELDS = '__all__'
# BaseSerializer
# --------------
class BaseSerializer(Field):
"""
The BaseSerializer class provides a minimal class which may be used
for writing custom serializer implementations.
Note that we strongly restrict the ordering of operations/properties
that may be used on the serializer in order to enforce correct usage.
In particular, if a `data=` argument is passed then:
.is_valid() - Available.
.initial_data - Available.
.validated_data - Only available after calling `is_valid()`
.errors - Only available after calling `is_valid()`
.data - Only available after calling `is_valid()`
If a `data=` argument is not passed then:
.is_valid() - Not available.
.initial_data - Not available.
.validated_data - Not available.
.errors - Not available.
.data - Available.
"""
def __init__(self, instance=None, data=empty, **kwargs):
self.instance = instance
if data is not empty:
self.initial_data = data
self.partial = kwargs.pop('partial', False)
self._context = kwargs.pop('context', {})
kwargs.pop('many', None)
super(BaseSerializer, self).__init__(**kwargs)
def __new__(cls, *args, **kwargs):
# We override this method in order to automagically create
# `ListSerializer` classes instead when `many=True` is set.
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(BaseSerializer, cls).__new__(cls, *args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
"""
This method implements the creation of a `ListSerializer` parent
class when `many=True` is used. You can customize it if you need to
control which keyword arguments are passed to the parent, and
which are passed to the child.
Note that we're over-cautious in passing most arguments to both parent
and child classes in order to try to cover the general case. If you're
overriding this method you'll probably want something much simpler, eg:
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls()
return CustomListSerializer(*args, **kwargs)
"""
allow_empty = kwargs.pop('allow_empty', None)
child_serializer = cls(*args, **kwargs)
list_kwargs = {
'child': child_serializer,
}
if allow_empty is not None:
list_kwargs['allow_empty'] = allow_empty
list_kwargs.update({
key: value for key, value in kwargs.items()
if key in LIST_SERIALIZER_KWARGS
})
meta = getattr(cls, 'Meta', None)
list_serializer_class = getattr(meta, 'list_serializer_class', ListSerializer)
return list_serializer_class(*args, **list_kwargs)
def to_internal_value(self, data):
raise NotImplementedError('`to_internal_value()` must be implemented.')
def to_representation(self, instance):
raise NotImplementedError('`to_representation()` must be implemented.')
def update(self, instance, validated_data):
raise NotImplementedError('`update()` must be implemented.')
def create(self, validated_data):
raise NotImplementedError('`create()` must be implemented.')
def save(self, **kwargs):
assert not hasattr(self, 'save_object'), (
'Serializer `%s.%s` has old-style version 2 `.save_object()` '
'that is no longer compatible with REST framework 3. '
'Use the new-style `.create()` and `.update()` methods instead.' %
(self.__class__.__module__, self.__class__.__name__)
)
assert hasattr(self, '_errors'), (
'You must call `.is_valid()` before calling `.save()`.'
)
assert not self.errors, (
'You cannot call `.save()` on a serializer with invalid data.'
)
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
assert not hasattr(self, '_data'), (
"You cannot call `.save()` after accessing `serializer.data`."
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
)
validated_data = dict(
list(self.validated_data.items()) +
list(kwargs.items())
)
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, raise_exception=False):
assert not hasattr(self, 'restore_object'), (
'Serializer `%s.%s` has old-style version 2 `.restore_object()` '
'that is no longer compatible with REST framework 3. '
'Use the new-style `.create()` and `.update()` methods instead.' %
(self.__class__.__module__, self.__class__.__name__)
)
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = {}
self._errors = exc.detail
else:
self._errors = {}
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
@property
def data(self):
if hasattr(self, 'initial_data') and not hasattr(self, '_validated_data'):
msg = (
'When a serializer is passed a `data` keyword argument you '
'must call `.is_valid()` before attempting to access the '
'serialized `.data` representation.\n'
'You should either call `.is_valid()` first, '
'or access `.initial_data` instead.'
)
raise AssertionError(msg)
if not hasattr(self, '_data'):
if self.instance is not None and not getattr(self, '_errors', None):
self._data = self.to_representation(self.instance)
elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None):
self._data = self.to_representation(self.validated_data)
else:
self._data = self.get_initial()
return self._data
@property
def errors(self):
if not hasattr(self, '_errors'):
msg = 'You must call `.is_valid()` before accessing `.errors`.'
raise AssertionError(msg)
return self._errors
@property
def validated_data(self):
if not hasattr(self, '_validated_data'):
msg = 'You must call `.is_valid()` before accessing `.validated_data`.'
raise AssertionError(msg)
return self._validated_data
# Serializer & ListSerializer classes
# -----------------------------------
class SerializerMetaclass(type):
"""
This metaclass sets a dictionary named `_declared_fields` on the class.
Any instances of `Field` included as attributes on either the class
or on any of its superclasses will be include in the
`_declared_fields` dictionary.
"""
@classmethod
def _get_declared_fields(cls, bases, attrs):
fields = [(field_name, attrs.pop(field_name))
for field_name, obj in list(attrs.items())
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1]._creation_counter)
# If this class is subclassing another Serializer, add that Serializer's
# fields. Note that we loop over the bases in *reverse*. This is necessary
# in order to maintain the correct order of fields.
for base in reversed(bases):
if hasattr(base, '_declared_fields'):
fields = list(base._declared_fields.items()) + fields
return OrderedDict(fields)
def __new__(cls, name, bases, attrs):
attrs['_declared_fields'] = cls._get_declared_fields(bases, attrs)
return super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
def as_serializer_error(exc):
assert isinstance(exc, (ValidationError, DjangoValidationError))
if isinstance(exc, DjangoValidationError):
detail = get_error_detail(exc)
else:
detail = exc.detail
if isinstance(detail, dict):
# If errors may be a dict we use the standard {key: list of values}.
# Here we ensure that all the values are *lists* of errors.
return {
key: value if isinstance(value, (list, dict)) else [value]
for key, value in detail.items()
}
elif isinstance(detail, list):
# Errors raised as a list are non-field errors.
return {
api_settings.NON_FIELD_ERRORS_KEY: detail
}
# Errors raised as a string are non-field errors.
return {
api_settings.NON_FIELD_ERRORS_KEY: [detail]
}
@six.add_metaclass(SerializerMetaclass)
class Serializer(BaseSerializer):
default_error_messages = {
'invalid': _('Invalid data. Expected a dictionary, but got {datatype}.')
}
@property
def fields(self):
"""
A dictionary of {field_name: field_instance}.
"""
# `fields` is evaluated lazily. We do this to ensure that we don't
# have issues importing modules that use ModelSerializers as fields,
# even if Django's app-loading stage has not yet run.
if not hasattr(self, '_fields'):
self._fields = BindingDict(self)
for key, value in self.get_fields().items():
self._fields[key] = value
return self._fields
@cached_property
def _writable_fields(self):
return [
field for field in self.fields.values()
if (not field.read_only) or (field.default is not empty)
]
@cached_property
def _readable_fields(self):
return [
field for field in self.fields.values()
if not field.write_only
]
def get_fields(self):
"""
Returns a dictionary of {field_name: field_instance}.
"""
# Every new serializer is created with a clone of the field instances.
# This allows users to dynamically modify the fields on a serializer
# instance without affecting every other serializer class.
return copy.deepcopy(self._declared_fields)
def get_validators(self):
"""
Returns a list of validator callables.
"""
# Used by the lazily-evaluated `validators` property.
meta = getattr(self, 'Meta', None)
validators = getattr(meta, 'validators', None)
return validators[:] if validators else []
def get_initial(self):
if hasattr(self, 'initial_data'):
return OrderedDict([
(field_name, field.get_value(self.initial_data))
for field_name, field in self.fields.items()
if (field.get_value(self.initial_data) is not empty) and
not field.read_only
])
return OrderedDict([
(field.field_name, field.get_initial())
for field in self.fields.values()
if not field.read_only
])
def get_value(self, dictionary):
# We override the default field access in order to support
# nested HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_dict(dictionary, prefix=self.field_name) or empty
return dictionary.get(self.field_name, empty)
def run_validation(self, data=empty):
"""
We override the default `run_validation`, because the validation
performed by validators and the `.validate()` method should
be coerced into an error dictionary with a 'non_fields_error' key.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
try:
self.run_validators(value)
value = self.validate(value)
assert value is not None, '.validate() should return the validated data'
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=as_serializer_error(exc))
return value
def to_internal_value(self, data):
"""
Dict of native values <- Dict of primitive datatypes.
"""
if not isinstance(data, dict):
message = self.error_messages['invalid'].format(
datatype=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='invalid')
ret = OrderedDict()
errors = OrderedDict()
fields = self._writable_fields
for field in fields:
validate_method = getattr(self, 'validate_' + field.field_name, None)
primitive_value = field.get_value(data)
try:
validated_value = field.run_validation(primitive_value)
if validate_method is not None:
validated_value = validate_method(validated_value)
except ValidationError as exc:
errors[field.field_name] = exc.detail
except DjangoValidationError as exc:
errors[field.field_name] = get_error_detail(exc)
except SkipField:
pass
else:
set_value(ret, field.source_attrs, validated_value)
if errors:
raise ValidationError(errors)
return ret
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
ret = OrderedDict()
fields = self._readable_fields
for field in fields:
try:
attribute = field.get_attribute(instance)
except SkipField:
continue
# We skip `to_representation` for `None` values so that fields do
# not have to explicitly deal with that case.
#
# For related fields with `use_pk_only_optimization` we need to
# resolve the pk value.
check_for_none = attribute.pk if isinstance(attribute, PKOnlyObject) else attribute
if check_for_none is None:
ret[field.field_name] = None
else:
ret[field.field_name] = field.to_representation(attribute)
return ret
def validate(self, attrs):
return attrs
def __repr__(self):
return unicode_to_repr(representation.serializer_repr(self, indent=1))
# The following are used for accessing `BoundField` instances on the
# serializer, for the purposes of presenting a form-like API onto the
# field values and field errors.
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.data.get(key)
error = self.errors.get(key) if hasattr(self, '_errors') else None
if isinstance(field, Serializer):
return NestedBoundField(field, value, error)
return BoundField(field, value, error)
# Include a backlink to the serializer class on return objects.
# Allows renderers such as HTMLFormRenderer to get the full field info.
@property
def data(self):
ret = super(Serializer, self).data
return ReturnDict(ret, serializer=self)
@property
def errors(self):
ret = super(Serializer, self).errors
if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null':
# Edge case. Provide a more descriptive error than
# "this field may not be null", when no data is passed.
detail = ErrorDetail('No data provided', code='null')
ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]}
return ReturnDict(ret, serializer=self)
# There's some replication of `ListField` here,
# but that's probably better than obfuscating the call hierarchy.
class ListSerializer(BaseSerializer):
child = None
many = True
default_error_messages = {
'not_a_list': _('Expected a list of items but got type "{input_type}".'),
'empty': _('This list may not be empty.')
}
def __init__(self, *args, **kwargs):
self.child = kwargs.pop('child', copy.deepcopy(self.child))
self.allow_empty = kwargs.pop('allow_empty', True)
assert self.child is not None, '`child` is a required argument.'
assert not inspect.isclass(self.child), '`child` has not been instantiated.'
super(ListSerializer, self).__init__(*args, **kwargs)
self.child.bind(field_name='', parent=self)
def get_initial(self):
if hasattr(self, 'initial_data'):
return self.to_representation(self.initial_data)
return []
def get_value(self, dictionary):
"""
Given the input dictionary, return the field value.
"""
# We override the default field access in order to support
# lists in HTML forms.
if html.is_html_input(dictionary):
return html.parse_html_list(dictionary, prefix=self.field_name)
return dictionary.get(self.field_name, empty)
def run_validation(self, data=empty):
"""
We override the default `run_validation`, because the validation
performed by validators and the `.validate()` method should
be coerced into an error dictionary with a 'non_fields_error' key.
"""
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
value = self.to_internal_value(data)
try:
self.run_validators(value)
value = self.validate(value)
assert value is not None, '.validate() should return the validated data'
except (ValidationError, DjangoValidationError) as exc:
raise ValidationError(detail=as_serializer_error(exc))
return value
def to_internal_value(self, data):
"""
List of dicts of native values <- List of dicts of primitive datatypes.
"""
if html.is_html_input(data):
data = html.parse_html_list(data)
if not isinstance(data, list):
message = self.error_messages['not_a_list'].format(
input_type=type(data).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='not_a_list')
if not self.allow_empty and len(data) == 0:
message = self.error_messages['empty']
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='empty')
ret = []
errors = []
for item in data:
try:
validated = self.child.run_validation(item)
except ValidationError as exc:
errors.append(exc.detail)
else:
ret.append(validated)
errors.append({})
if any(errors):
raise ValidationError(errors)
return ret
def to_representation(self, data):
"""
List of object instances -> List of dicts of primitive datatypes.
"""
# Dealing with nested relationships, data can be a Manager,
# so, first get a queryset from the Manager if needed
iterable = data.all() if isinstance(data, models.Manager) else data
return [
self.child.to_representation(item) for item in iterable
]
def validate(self, attrs):
return attrs
def update(self, instance, validated_data):
raise NotImplementedError(
"Serializers with many=True do not support multiple update by "
"default, only multiple create. For updates it is unclear how to "
"deal with insertions and deletions. If you need to support "
"multiple update, use a `ListSerializer` class and override "
"`.update()` so you can specify the behavior exactly."
)
def create(self, validated_data):
return [
self.child.create(attrs) for attrs in validated_data
]
def save(self, **kwargs):
"""
Save and return a list of object instances.
"""
# Guard against incorrect use of `serializer.save(commit=False)`
assert 'commit' not in kwargs, (
"'commit' is not a valid keyword argument to the 'save()' method. "
"If you need to access data before committing to the database then "
"inspect 'serializer.validated_data' instead. "
"You can also pass additional keyword arguments to 'save()' if you "
"need to set extra attributes on the saved model instance. "
"For example: 'serializer.save(owner=request.user)'.'"
)
validated_data = [
dict(list(attrs.items()) + list(kwargs.items()))
for attrs in self.validated_data
]
if self.instance is not None:
self.instance = self.update(self.instance, validated_data)
assert self.instance is not None, (
'`update()` did not return an object instance.'
)
else:
self.instance = self.create(validated_data)
assert self.instance is not None, (
'`create()` did not return an object instance.'
)
return self.instance
def is_valid(self, raise_exception=False):
# This implementation is the same as the default,
# except that we use lists, rather than dicts, as the empty case.
assert hasattr(self, 'initial_data'), (
'Cannot call `.is_valid()` as no `data=` keyword argument was '
'passed when instantiating the serializer instance.'
)
if not hasattr(self, '_validated_data'):
try:
self._validated_data = self.run_validation(self.initial_data)
except ValidationError as exc:
self._validated_data = []
self._errors = exc.detail
else:
self._errors = []
if self._errors and raise_exception:
raise ValidationError(self.errors)
return not bool(self._errors)
def __repr__(self):
return unicode_to_repr(representation.list_repr(self, indent=1))
# Include a backlink to the serializer class on return objects.
# Allows renderers such as HTMLFormRenderer to get the full field info.
@property
def data(self):
ret = super(ListSerializer, self).data
return ReturnList(ret, serializer=self)
@property
def errors(self):
ret = super(ListSerializer, self).errors
if isinstance(ret, list) and len(ret) == 1 and getattr(ret[0], 'code', None) == 'null':
# Edge case. Provide a more descriptive error than
# "this field may not be null", when no data is passed.
detail = ErrorDetail('No data provided', code='null')
ret = {api_settings.NON_FIELD_ERRORS_KEY: [detail]}
if isinstance(ret, dict):
return ReturnDict(ret, serializer=self)
return ReturnList(ret, serializer=self)
# ModelSerializer & HyperlinkedModelSerializer
# --------------------------------------------
def raise_errors_on_nested_writes(method_name, serializer, validated_data):
"""
Give explicit errors when users attempt to pass writable nested data.
If we don't do this explicitly they'd get a less helpful error when
calling `.save()` on the serializer.
We don't *automatically* support these sorts of nested writes because
there are too many ambiguities to define a default behavior.
Eg. Suppose we have a `UserSerializer` with a nested profile. How should
we handle the case of an update, where the `profile` relationship does
not exist? Any of the following might be valid:
* Raise an application error.
* Silently ignore the nested part of the update.
* Automatically create a profile instance.
"""
# Ensure we don't have a writable nested field. For example:
#
# class UserSerializer(ModelSerializer):
# ...
# profile = ProfileSerializer()
assert not any(
isinstance(field, BaseSerializer) and
(field.source in validated_data) and
isinstance(validated_data[field.source], (list, dict))
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable nested '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'nested serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
# Ensure we don't have a writable dotted-source field. For example:
#
# class UserSerializer(ModelSerializer):
# ...
# address = serializer.CharField('profile.address')
assert not any(
'.' in field.source and
(key in validated_data) and
isinstance(validated_data[key], (list, dict))
for key, field in serializer.fields.items()
), (
'The `.{method_name}()` method does not support writable dotted-source '
'fields by default.\nWrite an explicit `.{method_name}()` method for '
'serializer `{module}.{class_name}`, or set `read_only=True` on '
'dotted-source serializer fields.'.format(
method_name=method_name,
module=serializer.__class__.__module__,
class_name=serializer.__class__.__name__
)
)
class ModelSerializer(Serializer):
"""
A `ModelSerializer` is just a regular `Serializer`, except that:
* A set of default fields are automatically populated.
* A set of default validators are automatically populated.
* Default `.create()` and `.update()` implementations are provided.
The process of automatically determining a set of serializer fields
based on the model fields is reasonably complex, but you almost certainly
don't need to dig into the implementation.
If the `ModelSerializer` class *doesn't* generate the set of fields that
you need you should either declare the extra/differing fields explicitly on
the serializer class, or simply use a `Serializer` class.
"""
serializer_field_mapping = {
models.AutoField: IntegerField,
models.BigIntegerField: IntegerField,
models.BooleanField: BooleanField,
models.CharField: CharField,
models.CommaSeparatedIntegerField: CharField,
models.DateField: DateField,
models.DateTimeField: DateTimeField,
models.DecimalField: DecimalField,
models.EmailField: EmailField,
models.Field: ModelField,
models.FileField: FileField,
models.FloatField: FloatField,
models.ImageField: ImageField,
models.IntegerField: IntegerField,
models.NullBooleanField: NullBooleanField,
models.PositiveIntegerField: IntegerField,
models.PositiveSmallIntegerField: IntegerField,
models.SlugField: SlugField,
models.SmallIntegerField: IntegerField,
models.TextField: CharField,
models.TimeField: TimeField,
models.URLField: URLField,
models.GenericIPAddressField: IPAddressField,
models.FilePathField: FilePathField,
}
if ModelDurationField is not None:
serializer_field_mapping[ModelDurationField] = DurationField
if ModelJSONField is not None:
serializer_field_mapping[ModelJSONField] = JSONField
serializer_related_field = PrimaryKeyRelatedField
serializer_related_to_field = SlugRelatedField
serializer_url_field = HyperlinkedIdentityField
serializer_choice_field = ChoiceField
# The field name for hyperlinked identity fields. Defaults to 'url'.
# You can modify this using the API setting.
#
# Note that if you instead need modify this on a per-serializer basis,
# you'll also need to ensure you update the `create` method on any generic
# views, to correctly handle the 'Location' response header for
# "HTTP 201 Created" responses.
url_field_name = None
# Default `create` and `update` behavior...
def create(self, validated_data):
"""
We have a bit of extra checking around this in order to provide
descriptive messages when something goes wrong, but this method is
essentially just:
return ExampleModel.objects.create(**validated_data)
If there are many to many fields present on the instance then they
cannot be set until the model is instantiated, in which case the
implementation is like so:
example_relationship = validated_data.pop('example_relationship')
instance = ExampleModel.objects.create(**validated_data)
instance.example_relationship = example_relationship
return instance
The default implementation also does not handle nested relationships.
If you want to support writable nested relationships you'll need
to write an explicit `.create()` method.
"""
raise_errors_on_nested_writes('create', self, validated_data)
ModelClass = self.Meta.model
# Remove many-to-many relationships from validated_data.
# They are not valid arguments to the default `.create()` method,
# as they require that the instance has already been saved.
info = model_meta.get_field_info(ModelClass)
many_to_many = {}
for field_name, relation_info in info.relations.items():
if relation_info.to_many and (field_name in validated_data):
many_to_many[field_name] = validated_data.pop(field_name)
try:
instance = ModelClass.objects.create(**validated_data)
except TypeError:
tb = traceback.format_exc()
msg = (
'Got a `TypeError` when calling `%s.objects.create()`. '
'This may be because you have a writable field on the '
'serializer class that is not a valid argument to '
'`%s.objects.create()`. You may need to make the field '
'read-only, or override the %s.create() method to handle '
'this correctly.\nOriginal exception was:\n %s' %
(
ModelClass.__name__,
ModelClass.__name__,
self.__class__.__name__,
tb
)
)
raise TypeError(msg)
# Save many-to-many relationships after the instance is created.
if many_to_many:
for field_name, value in many_to_many.items():
set_many(instance, field_name, value)
return instance
def update(self, instance, validated_data):
raise_errors_on_nested_writes('update', self, validated_data)
info = model_meta.get_field_info(instance)
# Simply set each attribute on the instance, and then save it.
# Note that unlike `.create()` we don't need to treat many-to-many
# relationships as being a special case. During updates we already
# have an instance pk for the relationships to be associated with.
for attr, value in validated_data.items():
if attr in info.relations and info.relations[attr].to_many:
set_many(instance, attr, value)
else:
setattr(instance, attr, value)
instance.save()
return instance
# Determine the fields to apply...
def get_fields(self):
"""
Return the dict of field names -> field instances that should be
used for `self.fields` when instantiating the serializer.
"""
if self.url_field_name is None:
self.url_field_name = api_settings.URL_FIELD_NAME
assert hasattr(self, 'Meta'), (
'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
)
assert hasattr(self.Meta, 'model'), (
'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
)
if model_meta.is_abstract_model(self.Meta.model):
raise ValueError(
'Cannot use ModelSerializer with Abstract Models.'
)
declared_fields = copy.deepcopy(self._declared_fields)
model = getattr(self.Meta, 'model')
depth = getattr(self.Meta, 'depth', 0)
if depth is not None:
assert depth >= 0, "'depth' may not be negative."
assert depth <= 10, "'depth' may not be greater than 10."
# Retrieve metadata about fields & relationships on the model class.
info = model_meta.get_field_info(model)
field_names = self.get_field_names(declared_fields, info)
# Determine any extra field arguments and hidden fields that
# should be included
extra_kwargs = self.get_extra_kwargs()
extra_kwargs, hidden_fields = self.get_uniqueness_extra_kwargs(
field_names, declared_fields, extra_kwargs
)
# Determine the fields that should be included on the serializer.
fields = OrderedDict()
for field_name in field_names:
# If the field is explicitly declared on the class then use that.
if field_name in declared_fields:
fields[field_name] = declared_fields[field_name]
continue
# Determine the serializer field class and keyword arguments.
field_class, field_kwargs = self.build_field(
field_name, info, model, depth
)
# Include any kwargs defined in `Meta.extra_kwargs`
extra_field_kwargs = extra_kwargs.get(field_name, {})
field_kwargs = self.include_extra_kwargs(
field_kwargs, extra_field_kwargs
)
# Create the serializer field.
fields[field_name] = field_class(**field_kwargs)
# Add in any hidden fields.
fields.update(hidden_fields)
return fields
# Methods for determining the set of field names to include...
def get_field_names(self, declared_fields, info):
"""
Returns the list of all field names that should be created when
instantiating this serializer class. This is based on the default
set of fields, but also takes into account the `Meta.fields` or
`Meta.exclude` options if they have been specified.
"""
fields = getattr(self.Meta, 'fields', None)
exclude = getattr(self.Meta, 'exclude', None)
if fields and fields != ALL_FIELDS and not isinstance(fields, (list, tuple)):
raise TypeError(
'The `fields` option must be a list or tuple or "__all__". '
'Got %s.' % type(fields).__name__
)
if exclude and not isinstance(exclude, (list, tuple)):
raise TypeError(
'The `exclude` option must be a list or tuple. Got %s.' %
type(exclude).__name__
)
assert not (fields and exclude), (
"Cannot set both 'fields' and 'exclude' options on "
"serializer {serializer_class}.".format(
serializer_class=self.__class__.__name__
)
)
assert not (fields is None and exclude is None), (
"Creating a ModelSerializer without either the 'fields' attribute "
"or the 'exclude' attribute has been deprecated since 3.3.0, "
"and is now disallowed. Add an explicit fields = '__all__' to the "
"{serializer_class} serializer.".format(
serializer_class=self.__class__.__name__
),
)
if fields == ALL_FIELDS:
fields = None
if fields is not None:
# Ensure that all declared fields have also been included in the
# `Meta.fields` option.
# Do not require any fields that are declared a parent class,
# in order to allow serializer subclasses to only include
# a subset of fields.
required_field_names = set(declared_fields)
for cls in self.__class__.__bases__:
required_field_names -= set(getattr(cls, '_declared_fields', []))
for field_name in required_field_names:
assert field_name in fields, (
"The field '{field_name}' was declared on serializer "
"{serializer_class}, but has not been included in the "
"'fields' option.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
return fields
# Use the default set of field names if `Meta.fields` is not specified.
fields = self.get_default_field_names(declared_fields, info)
if exclude is not None:
# If `Meta.exclude` is included, then remove those fields.
for field_name in exclude:
assert field_name in fields, (
"The field '{field_name}' was included on serializer "
"{serializer_class} in the 'exclude' option, but does "
"not match any model field.".format(
field_name=field_name,
serializer_class=self.__class__.__name__
)
)
fields.remove(field_name)
return fields
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[model_info.pk.name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
)
# Methods for constructing serializer fields...
def build_field(self, field_name, info, model_class, nested_depth):
"""
Return a two tuple of (cls, kwargs) to build a serializer field with.
"""
if field_name in info.fields_and_pk:
model_field = info.fields_and_pk[field_name]
return self.build_standard_field(field_name, model_field)
elif field_name in info.relations:
relation_info = info.relations[field_name]
if not nested_depth:
return self.build_relational_field(field_name, relation_info)
else:
return self.build_nested_field(field_name, relation_info, nested_depth)
elif hasattr(model_class, field_name):
return self.build_property_field(field_name, model_class)
elif field_name == self.url_field_name:
return self.build_url_field(field_name, model_class)
return self.build_unknown_field(field_name, model_class)
def build_standard_field(self, field_name, model_field):
"""
Create regular model fields.
"""
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = get_field_kwargs(field_name, model_field)
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = set((
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
))
for key in list(field_kwargs.keys()):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, ModelField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, CharField) and not issubclass(field_class, ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if postgres_fields and isinstance(model_field, postgres_fields.ArrayField):
# Populate the `child` argument on `ListField` instances generated
# for the PostgrSQL specfic `ArrayField`.
child_model_field = model_field.base_field
child_field_class, child_field_kwargs = self.build_standard_field(
'child', child_model_field
)
field_kwargs['child'] = child_field_class(**child_field_kwargs)
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
"""
Create fields for forward and reverse relationships.
"""
field_class = self.serializer_related_field
field_kwargs = get_relation_kwargs(field_name, relation_info)
to_field = field_kwargs.pop('to_field', None)
if to_field and not relation_info.reverse and not relation_info.related_model._meta.get_field(to_field).primary_key:
field_kwargs['slug_field'] = to_field
field_class = self.serializer_related_to_field
# `view_name` is only valid for hyperlinked relationships.
if not issubclass(field_class, HyperlinkedRelatedField):
field_kwargs.pop('view_name', None)
return field_class, field_kwargs
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(ModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
def build_property_field(self, field_name, model_class):
"""
Create a read only field for model methods and properties.
"""
field_class = ReadOnlyField
field_kwargs = {}
return field_class, field_kwargs
def build_url_field(self, field_name, model_class):
"""
Create a field representing the object's own URL.
"""
field_class = self.serializer_url_field
field_kwargs = get_url_kwargs(model_class)
return field_class, field_kwargs
def build_unknown_field(self, field_name, model_class):
"""
Raise an error on any unknown fields.
"""
raise ImproperlyConfigured(
'Field name `%s` is not valid for model `%s`.' %
(field_name, model_class.__name__)
)
def include_extra_kwargs(self, kwargs, extra_kwargs):
"""
Include any 'extra_kwargs' that have been included for this field,
possibly removing any incompatible existing keyword arguments.
"""
if extra_kwargs.get('read_only', False):
for attr in [
'required', 'default', 'allow_blank', 'allow_null',
'min_length', 'max_length', 'min_value', 'max_value',
'validators', 'queryset'
]:
kwargs.pop(attr, None)
if extra_kwargs.get('default') and kwargs.get('required') is False:
kwargs.pop('required')
if extra_kwargs.get('read_only', kwargs.get('read_only', False)):
extra_kwargs.pop('required', None) # Read only fields should always omit the 'required' argument.
kwargs.update(extra_kwargs)
return kwargs
# Methods for determining additional keyword arguments to apply...
def get_extra_kwargs(self):
"""
Return a dictionary mapping field names to a dictionary of
additional keyword arguments.
"""
extra_kwargs = copy.deepcopy(getattr(self.Meta, 'extra_kwargs', {}))
read_only_fields = getattr(self.Meta, 'read_only_fields', None)
if read_only_fields is not None:
if not isinstance(read_only_fields, (list, tuple)):
raise TypeError(
'The `read_only_fields` option must be a list or tuple. '
'Got %s.' % type(read_only_fields).__name__
)
for field_name in read_only_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def get_uniqueness_extra_kwargs(self, field_names, declared_fields, extra_kwargs):
"""
Return any additional field options that need to be included as a
result of uniqueness constraints on the model. This is returned as
a two-tuple of:
('dict of updated extra kwargs', 'mapping of hidden fields')
"""
if getattr(self.Meta, 'validators', None) is not None:
return (extra_kwargs, {})
model = getattr(self.Meta, 'model')
model_fields = self._get_model_fields(
field_names, declared_fields, extra_kwargs
)
# Determine if we need any additional `HiddenField` or extra keyword
# arguments to deal with `unique_for` dates that are required to
# be in the input data in order to validate it.
unique_constraint_names = set()
for model_field in model_fields.values():
# Include each of the `unique_for_*` field names.
unique_constraint_names |= {model_field.unique_for_date, model_field.unique_for_month,
model_field.unique_for_year}
unique_constraint_names -= {None}
# Include each of the `unique_together` field names,
# so long as all the field names are included on the serializer.
for parent_class in [model] + list(model._meta.parents.keys()):
for unique_together_list in parent_class._meta.unique_together:
if set(field_names).issuperset(set(unique_together_list)):
unique_constraint_names |= set(unique_together_list)
# Now we have all the field names that have uniqueness constraints
# applied, we can add the extra 'required=...' or 'default=...'
# arguments that are appropriate to these fields, or add a `HiddenField` for it.
hidden_fields = {}
uniqueness_extra_kwargs = {}
for unique_constraint_name in unique_constraint_names:
# Get the model field that is referred too.
unique_constraint_field = model._meta.get_field(unique_constraint_name)
if getattr(unique_constraint_field, 'auto_now_add', None):
default = CreateOnlyDefault(timezone.now)
elif getattr(unique_constraint_field, 'auto_now', None):
default = timezone.now
elif unique_constraint_field.has_default():
default = unique_constraint_field.default
else:
default = empty
if unique_constraint_name in model_fields:
# The corresponding field is present in the serializer
if default is empty:
uniqueness_extra_kwargs[unique_constraint_name] = {'required': True}
else:
uniqueness_extra_kwargs[unique_constraint_name] = {'default': default}
elif default is not empty:
# The corresponding field is not present in the
# serializer. We have a default to use for it, so
# add in a hidden field that populates it.
hidden_fields[unique_constraint_name] = HiddenField(default=default)
# Update `extra_kwargs` with any new options.
for key, value in uniqueness_extra_kwargs.items():
if key in extra_kwargs:
value.update(extra_kwargs[key])
extra_kwargs[key] = value
return extra_kwargs, hidden_fields
def _get_model_fields(self, field_names, declared_fields, extra_kwargs):
"""
Returns all the model fields that are being mapped to by fields
on the serializer class.
Returned as a dict of 'model field name' -> 'model field'.
Used internally by `get_uniqueness_field_options`.
"""
model = getattr(self.Meta, 'model')
model_fields = {}
for field_name in field_names:
if field_name in declared_fields:
# If the field is declared on the serializer
field = declared_fields[field_name]
source = field.source or field_name
else:
try:
source = extra_kwargs[field_name]['source']
except KeyError:
source = field_name
if '.' in source or source == '*':
# Model fields will always have a simple source mapping,
# they can't be nested attribute lookups.
continue
try:
field = model._meta.get_field(source)
if isinstance(field, DjangoModelField):
model_fields[source] = field
except FieldDoesNotExist:
pass
return model_fields
# Determine the validators to apply...
def get_validators(self):
"""
Determine the set of validators to use when instantiating serializer.
"""
# If the validators have been declared explicitly then use that.
validators = getattr(getattr(self, 'Meta', None), 'validators', None)
if validators is not None:
return validators[:]
# Otherwise use the default set of validators.
return (
self.get_unique_together_validators() +
self.get_unique_for_date_validators()
)
def get_unique_together_validators(self):
"""
Determine a default set of validators for any unique_together constraints.
"""
model_class_inheritance_tree = (
[self.Meta.model] +
list(self.Meta.model._meta.parents.keys())
)
# The field names we're passing though here only include fields
# which may map onto a model field. Any dotted field name lookups
# cannot map to a field, and must be a traversal, so we're not
# including those.
field_names = {
field.source for field in self._writable_fields
if (field.source != '*') and ('.' not in field.source)
}
# Note that we make sure to check `unique_together` both on the
# base model class, but also on any parent classes.
validators = []
for parent_class in model_class_inheritance_tree:
for unique_together in parent_class._meta.unique_together:
if field_names.issuperset(set(unique_together)):
validator = UniqueTogetherValidator(
queryset=parent_class._default_manager,
fields=unique_together
)
validators.append(validator)
return validators
def get_unique_for_date_validators(self):
"""
Determine a default set of validators for the following constraints:
* unique_for_date
* unique_for_month
* unique_for_year
"""
info = model_meta.get_field_info(self.Meta.model)
default_manager = self.Meta.model._default_manager
field_names = [field.source for field in self.fields.values()]
validators = []
for field_name, field in info.fields_and_pk.items():
if field.unique_for_date and field_name in field_names:
validator = UniqueForDateValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_date
)
validators.append(validator)
if field.unique_for_month and field_name in field_names:
validator = UniqueForMonthValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_month
)
validators.append(validator)
if field.unique_for_year and field_name in field_names:
validator = UniqueForYearValidator(
queryset=default_manager,
field=field_name,
date_field=field.unique_for_year
)
validators.append(validator)
return validators
if hasattr(models, 'UUIDField'):
ModelSerializer.serializer_field_mapping[models.UUIDField] = UUIDField
# IPAddressField is deprecated in Django
if hasattr(models, 'IPAddressField'):
ModelSerializer.serializer_field_mapping[models.IPAddressField] = IPAddressField
if postgres_fields:
class CharMappingField(DictField):
child = CharField(allow_blank=True)
ModelSerializer.serializer_field_mapping[postgres_fields.HStoreField] = CharMappingField
ModelSerializer.serializer_field_mapping[postgres_fields.ArrayField] = ListField
class HyperlinkedModelSerializer(ModelSerializer):
"""
A type of `ModelSerializer` that uses hyperlinked relationships instead
of primary key relationships. Specifically:
* A 'url' field is included instead of the 'id' field.
* Relationships to other instances are hyperlinks, instead of primary keys.
"""
serializer_related_field = HyperlinkedRelatedField
def get_default_field_names(self, declared_fields, model_info):
"""
Return the default list of field names that will be used if the
`Meta.fields` option is not specified.
"""
return (
[self.url_field_name] +
list(declared_fields.keys()) +
list(model_info.fields.keys()) +
list(model_info.forward_relations.keys())
)
def build_nested_field(self, field_name, relation_info, nested_depth):
"""
Create nested fields for forward and reverse relationships.
"""
class NestedSerializer(HyperlinkedModelSerializer):
class Meta:
model = relation_info.related_model
depth = nested_depth - 1
fields = '__all__'
field_class = NestedSerializer
field_kwargs = get_nested_relation_kwargs(relation_info)
return field_class, field_kwargs
| gpl-3.0 |
LTD-Beget/sprutio-rpc | lib/FileManager/workers/webdav/moveFromWebDav.py | 1 | 7786 | import os
import shutil
import threading
import time
import traceback
from lib.FileManager.FM import REQUEST_DELAY
from lib.FileManager.WebDavConnection import WebDavConnection
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class MoveFromWebDav(BaseWorkerCustomer):
def __init__(self, source, target, paths, overwrite, *args, **kwargs):
super(MoveFromWebDav, self).__init__(*args, **kwargs)
self.source = source
self.target = target
self.paths = paths
self.overwrite = overwrite
self.webdav = WebDavConnection.create(self.login, self.source.get('server_id'), self.logger)
self.operation_progress = {
"total_done": False,
"total": 0,
"operation_done": False,
"processed": 0,
"previous_percent": 0
}
def run(self):
try:
self.preload()
success_paths = []
error_paths = []
source_path = self.source.get('path')
target_path = self.target.get('path')
if source_path is None:
raise Exception("Source path empty")
if target_path is None:
raise Exception("Target path empty")
target_path = self.get_abs_path(target_path)
self.logger.info("MoveFromWebDav process run source = %s , target = %s" % (source_path, target_path))
t_total = threading.Thread(target=self.get_total, args=(self.operation_progress, self.paths))
t_total.start()
for path in self.paths:
try:
download_path = target_path
if self.webdav.isdir(path):
path += '/'
download_path += path.replace(self.webdav.parent(path), "/", 1)
download_result = self.download_file_from_webdav(path, download_path, self.operation_progress)
if download_result['success']:
success_paths.append(path)
self.webdav.remove(path)
except Exception as e:
self.logger.error(
"Error copy %s , error %s , %s" % (str(path), str(e), traceback.format_exc()))
error_paths.append(path)
self.operation_progress["operation_done"] = True
result = {
"success": success_paths,
"errors": error_paths
}
# иначе пользователям кажется что скопировалось не полностью )
progress = {
'percent': round(float(len(success_paths)) / float(len(self.paths)), 2),
'text': str(int(round(float(len(success_paths)) / float(len(self.paths)), 2) * 100)) + '%'
}
time.sleep(REQUEST_DELAY)
self.on_success(self.status_id, data=result, progress=progress, pid=self.pid, pname=self.name)
except Exception as e:
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
def download_file_from_webdav(self, path, target_path, operation_progress):
try:
download_result = {}
target_file = target_path + path
if not os.path.exists(target_file):
download_result = self.webdav.download(path, target_path, self.downloading_progress)
if not download_result['success'] or len(download_result['file_list']['failed']) > 0:
raise download_result['error'] if download_result[
'error'] is not None else Exception(
"Download error")
elif self.overwrite and os.path.exists(target_file) and not os.path.isdir(target_file):
download_result = self.webdav.download(path, target_path, self.downloading_progress)
if not download_result['success'] or len(download_result['file_list']['failed']) > 0:
raise download_result['error'] if download_result[
'error'] is not None else Exception(
"Download error")
elif self.overwrite and os.path.isdir(target_file):
"""
See https://docs.python.org/3.4/library/shutil.html?highlight=shutil#shutil.copy
In case copy file when destination is dir
"""
shutil.rmtree(target_file)
download_result = self.webdav.download(path, target_path, self.downloading_progress)
if not download_result['success'] or len(download_result['file_list']['failed']) > 0:
raise download_result['error'] if download_result[
'error'] is not None else Exception(
"Download error")
else:
pass
except Exception as e:
self.logger.info("Cannot move file %s , %s" % (path, str(e)))
raise e
finally:
operation_progress["processed"] += 1
return download_result
def get_total(self, progress_object, paths, count_files=True):
self.logger.debug("start get_total() files = %s" % count_files)
webdav = WebDavConnection.create(self.login, self.source.get('server_id'), self.logger)
for path in paths:
try:
self.recursive_total(webdav, path, progress_object)
except Exception as e:
self.logger.error("Error get_total file %s , error %s" % (str(path), str(e)))
continue
progress_object["total_done"] = True
self.logger.debug("done get_total(), found %s files" % progress_object.get("total"))
return
def recursive_total(self, webdav, path, progress_object):
if webdav.isfile(path):
progress_object["total"] += 1
else:
for file in webdav.listdir(path):
self.recursive_total(webdav, file, progress_object)
def downloading_progress(self, download_t, download_d, upload_t, upload_d):
try:
percent_download = 0
if download_t != 0:
percent_download = round(float(download_d) / float(download_t), 2)
if percent_download != self.operation_progress.get("previous_percent"):
if percent_download == 0 and self.operation_progress.get("previous_percent") != 0:
self.operation_progress["processed"] += 1
self.operation_progress["previous_percent"] = percent_download
total_percent = percent_download + self.operation_progress.get("processed")
denominator = 50
if self.operation_progress.get("total_done"):
denominator = self.operation_progress.get("total")
percent = round(float(total_percent) /
float(denominator), 2)
self.logger.debug("percentage changed to %s" % percent)
progress = {
'percent': percent,
'text': str(int(percent * 100)) + '%'
}
self.on_running(self.status_id, progress=progress, pid=self.pid, pname=self.name)
except Exception as ex:
self.logger.error("Error in MoveFromWebDav downloading_progress(): %s, traceback = %s" %
(str(ex), traceback.format_exc()))
| gpl-3.0 |
tszym/ansible | lib/ansible/module_utils/cloud.py | 3 | 8468 | #
# (c) 2016 Allen Sanabria, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module adds shared support for generic cloud modules
In order to use this module, include it as part of a custom
module as shown below.
from ansible.module_utils.cloud import *
The 'cloud' module provides the following common classes:
* CloudRetry
- The base class to be used by other cloud providers, in order to
provide a backoff/retry decorator based on status codes.
- Example using the AWSRetry class which inherits from CloudRetry.
@AWSRetry.exponential_backoff(retries=10, delay=3)
get_ec2_security_group_ids_from_names()
@AWSRetry.jittered_backoff()
get_ec2_security_group_ids_from_names()
"""
import random
from functools import wraps
import syslog
import time
from ansible.module_utils.pycompat24 import get_exception
def _exponential_backoff(retries=10, delay=2, backoff=2, max_delay=60):
""" Customizable exponential backoff strategy.
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Initial (base) delay.
backoff (float): base of the exponent to use for exponential
backoff.
max_delay (int): Optional. If provided each delay generated is capped
at this amount. Defaults to 60 seconds.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for an exponential backoff strategy.
Usage:
>>> backoff = _exponential_backoff()
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[2, 4, 8, 16, 32, 60, 60, 60, 60, 60]
"""
def backoff_gen():
for retry in range(0, retries):
sleep = delay * backoff ** retry
yield sleep if max_delay is None else min(sleep, max_delay)
return backoff_gen
def _full_jitter_backoff(retries=10, delay=3, max_delay=60, _random=random):
""" Implements the "Full Jitter" backoff strategy described here
https://www.awsarchitectureblog.com/2015/03/backoff.html
Args:
retries (int): Maximum number of times to retry a request.
delay (float): Approximate number of seconds to sleep for the first
retry.
max_delay (int): The maximum number of seconds to sleep for any retry.
_random (random.Random or None): Makes this generator testable by
allowing developers to explicitly pass in the a seeded Random.
Returns:
Callable that returns a generator. This generator yields durations in
seconds to be used as delays for a full jitter backoff strategy.
Usage:
>>> backoff = _full_jitter_backoff(retries=5)
>>> backoff
<function backoff_backoff at 0x7f0d939facf8>
>>> list(backoff())
[3, 6, 5, 23, 38]
>>> list(backoff())
[2, 1, 6, 6, 31]
"""
def backoff_gen():
for retry in range(0, retries):
yield _random.randint(0, min(max_delay, delay * 2 ** retry))
return backoff_gen
class CloudRetry(object):
""" CloudRetry can be used by any cloud provider, in order to implement a
backoff algorithm/retry effect based on Status Code from Exceptions.
"""
# This is the base class of the exception.
# AWS Example botocore.exceptions.ClientError
base_class = None
@staticmethod
def status_code_from_exception(error):
""" Return the status code from the exception object
Args:
error (object): The exception itself.
"""
pass
@staticmethod
def found(response_code):
""" Return True if the Response Code to retry on was found.
Args:
response_code (str): This is the Response Code that is being matched against.
"""
pass
@classmethod
def _backoff(cls, backoff_strategy):
""" Retry calling the Cloud decorated function using the provided
backoff strategy.
Args:
backoff_strategy (callable): Callable that returns a generator. The
generator should yield sleep times for each retry of the decorated
function.
"""
def deco(f):
@wraps(f)
def retry_func(*args, **kwargs):
for delay in backoff_strategy():
try:
return f(*args, **kwargs)
except Exception:
e = get_exception()
if isinstance(e, cls.base_class):
response_code = cls.status_code_from_exception(e)
if cls.found(response_code):
msg = "{0}: Retrying in {1} seconds...".format(str(e), delay)
syslog.syslog(syslog.LOG_INFO, msg)
time.sleep(delay)
else:
# Return original exception if exception is not a ClientError
raise e
else:
# Return original exception if exception is not a ClientError
raise e
return f(*args, **kwargs)
return retry_func # true decorator
return deco
@classmethod
def exponential_backoff(cls, retries=10, delay=3, backoff=2, max_delay=60):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
max_delay (int or None): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_exponential_backoff(
retries=retries, delay=delay, backoff=backoff, max_delay=max_delay))
@classmethod
def jittered_backoff(cls, retries=10, delay=3, max_delay=60):
"""
Retry calling the Cloud decorated function using a jittered backoff
strategy. More on this strategy here:
https://www.awsarchitectureblog.com/2015/03/backoff.html
Kwargs:
retries (int): Number of times to retry a failed request before giving up
default=10
delay (int): Initial delay between retries in seconds
default=3
max_delay (int): maximum amount of time to wait between retries.
default=60
"""
return cls._backoff(_full_jitter_backoff(
retries=retries, delay=delay, max_delay=max_delay))
@classmethod
def backoff(cls, tries=10, delay=3, backoff=1.1):
"""
Retry calling the Cloud decorated function using an exponential backoff.
Compatibility for the original implementation of CloudRetry.backoff that
did not provide configurable backoff strategies. Developers should use
CloudRetry.exponential_backoff instead.
Kwargs:
tries (int): Number of times to try (not retry) before giving up
default=10
delay (int or float): Initial delay between retries in seconds
default=3
backoff (int or float): backoff multiplier e.g. value of 2 will
double the delay each retry
default=1.1
"""
return cls.exponential_backoff(
retries=tries - 1, delay=delay, backoff=backoff, max_delay=None)
| gpl-3.0 |
demarle/VTK | ThirdParty/Twisted/twisted/trial/__init__.py | 60 | 2053 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
"""
Asynchronous unit testing framework.
Trial extends Python's builtin C{unittest} to provide support for asynchronous
tests.
Maintainer: Jonathan Lange
Trial strives to be compatible with other Python xUnit testing frameworks.
"Compatibility" is a difficult things to define. In practice, it means that:
- L{twisted.trial.unittest.TestCase} objects should be able to be used by
other test runners without those runners requiring special support for
Trial tests.
- Tests that subclass the standard library C{TestCase} and don't do anything
"too weird" should be able to be discoverable and runnable by the Trial
test runner without the authors of those tests having to jump through
hoops.
- Tests that implement the interface provided by the standard library
C{TestCase} should be runnable by the Trial runner.
- The Trial test runner and Trial L{unittest.TestCase} objects ought to be
able to use standard library C{TestResult} objects, and third party
C{TestResult} objects based on the standard library.
This list is not necessarily exhaustive -- compatibility is hard to define.
Contributors who discover more helpful ways of defining compatibility are
encouraged to update this document.
Examples:
B{Timeouts} for tests should be implemented in the runner. If this is done,
then timeouts could work for third-party TestCase objects as well as for
L{twisted.trial.unittest.TestCase} objects. Further, Twisted C{TestCase}
objects will run in other runners without timing out.
See U{http://twistedmatrix.com/trac/ticket/2675}.
Running tests in a temporary directory should be a feature of the test case,
because often tests themselves rely on this behaviour. If the feature is
implemented in the runner, then tests will change behaviour (possibly
breaking) when run in a different test runner. Further, many tests don't even
care about the filesystem.
See U{http://twistedmatrix.com/trac/ticket/2916}.
"""
| bsd-3-clause |
mrshelly/openerp71313 | openerp/addons/purchase_analytic_plans/__init__.py | 441 | 1220 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import purchase_analytic_plans
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rlindner81/pyload | module/plugins/crypter/RelinkUs.py | 1 | 11552 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import binascii
import re
import Crypto.Cipher.AES
from module.plugins.captcha.SolveMedia import SolveMedia
from module.plugins.internal.Captcha import Captcha
from module.plugins.internal.Crypter import Crypter
from module.plugins.internal.misc import fsjoin, replace_patterns
class RelinkUs(Crypter):
__name__ = "RelinkUs"
__type__ = "crypter"
__version__ = "3.20"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?relink\.(?:us|to)/(f/|((view|go)\.php\?id=))(?P<ID>.+)'
__config__ = [("activated", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("folder_per_package", "Default;Yes;No", "Create folder for each package", "Default")]
__description__ = """Relink.us decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("fragonib", "fragonib[AT]yahoo[DOT]es"),
("AndroKev", "[email protected]")]
URL_REPLACEMENTS = [(__pattern__ + '.*', r'http://relink.to/f/\g<ID>')]
PREFERRED_LINK_SOURCES = ["cnl2", "dlc", "web"]
OFFLINE_TOKEN = r'<title>Tattooside'
PASSWORD_TOKEN = r'container_password.php'
PASSWORD_ERROR_ROKEN = r'You have entered an incorrect password'
PASSWORD_SUBMIT_URL = r'http://relink.to/container_password.php'
CAPTCHA_TOKEN = r'container_captcha.php'
CIRCLE_CAPTCHA_PATTERN = r'id="captcha_id" value="(\w+?)"'
CAPTCHA_ERROR_ROKEN = r'You have solved the captcha wrong'
CIRCLE_CAPTCHA_IMG_URL = r'http://relink.to/core/captcha/circlecaptcha.php'
CAPTCHA_SUBMIT_URL = r'http://relink.to/container_captcha.php'
FILE_TITLE_PATTERN = r'<th>Title</th><td>(.*)</td></tr>'
FILE_NOTITLE = r'No title'
CNL2_FORM_PATTERN = r'<form id="cnl_form-(.*?)</form>'
CNL2_FORMINPUT_PATTERN = r'<input.*?name="%s".*?value="(.*?)"'
CNL2_JK_KEY = "jk"
CNL2_CRYPTED_KEY = "crypted"
DLC_LINK_PATTERN = r'<a href=".*?" class="dlc_button" target="_blank">'
DLC_DOWNLOAD_URL = r'http://relink.to/download.php'
WEB_FORWARD_PATTERN = r'getFile\(\'(.+)\'\)'
WEB_FORWARD_URL = r'http://relink.to/frame.php'
WEB_LINK_PATTERN = r'<iframe name="Container" height="100%" frameborder="no" width="100%" src="(.+)"></iframe>'
def setup(self):
self.file_id = None
self.package = None
self.captcha = Captcha(self.pyfile)
def decrypt(self, pyfile):
#: Init
self.init_package(pyfile)
#: Request package
self.request_package()
#: Check for online
if not self.is_online():
self.offline()
#: Check for protection
if self.is_password_protected():
self.unlock_password_protection()
self.handle_errors()
if self.is_captcha_protected():
self.unlock_captcha_protection()
self.handle_errors()
#: Get package name and folder
pack_name, folder_name = self.get_package_info()
#: Extract package links
pack_links = []
for sources in self.PREFERRED_LINK_SOURCES:
pack_links.extend(self.handle_link_source(sources))
if pack_links: #: Use only first source which provides links
break
pack_links = set(pack_links)
#: Pack
if pack_links:
self.packages = [(pack_name, pack_links, folder_name)]
def init_package(self, pyfile):
pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
self.file_id = re.match(self.__pattern__, pyfile.url).group('ID')
self.package = pyfile.package()
def request_package(self):
self.data = self.load(self.pyfile.url)
def is_online(self):
if self.OFFLINE_TOKEN in self.data:
self.log_debug("File not found")
return False
return True
def is_password_protected(self):
if self.PASSWORD_TOKEN in self.data:
self.log_debug("Links are password protected")
return True
def is_captcha_protected(self):
if self.CAPTCHA_TOKEN in self.data:
self.log_debug("Links are captcha protected")
return True
return False
def unlock_password_protection(self):
password = self.get_password()
self.log_debug(
"Submitting password [%s] for protected links" %
password)
if password:
passwd_url = self.PASSWORD_SUBMIT_URL + "?id=%s" % self.file_id
passwd_data = {
'id': self.file_id,
'password': password,
'pw': 'submit'}
self.data = self.load(passwd_url, post=passwd_data)
def unlock_captcha_protection(self):
m = re.search(self.CIRCLE_CAPTCHA_PATTERN, self.data)
if m:
self.log_debug("Request circle captcha resolving")
captcha_id = m.group(1)
coords = self.captcha.decrypt(
self.CIRCLE_CAPTCHA_IMG_URL,
get={
'id': captcha_id},
input_type="png",
output_type='positional') # , ocr="CircleCaptcha")
self.log_debug(
"Captcha resolved, coords (%s,%s)" %
(coords[0], coords[1]))
post_data = {'button.x': coords[0],
'button.y': coords[1],
'captcha_id': captcha_id,
'captcha_type': "RelinkCircle",
'captcha': "submit"}
else:
solvemedia = SolveMedia(self.pyfile)
captcha_key = solvemedia.detect_key()
if captcha_key:
self.log_debug(_("Request SolveMedia captcha resolving"))
response, challenge = solvemedia.challenge()
post_data = {'adcopy_response': response,
'adcopy_challenge': challenge,
'captcha_type': "Solvemedia",
'submit': "Continue",
'captcha': "submit"}
else:
self.log_error(_("Unknown captcha type detected"))
self.fail(_("Unknown captcha type"))
self.data = self.load(self.CAPTCHA_SUBMIT_URL,
ref=False, # ref=self.CAPTCHA_SUBMIT_URL + "&id=" + self.file_id,
get={'id': self.file_id},
post=post_data)
def get_package_info(self):
name = folder = None
#: Try to get info from web
# self.data = self.load(self.pyfile.url)
m = re.search(self.FILE_TITLE_PATTERN, self.data)
if m is not None:
title = m.group(1).strip()
if not self.FILE_NOTITLE in title:
name = folder = title
self.log_debug(
_("Found name [%s] and folder [%s] in package info") %
(name, folder))
#: Fallback to defaults
if not name or not folder:
name = self.package.name
folder = self.package.folder
self.log_debug(
_("Package info not found, defaulting to pyfile name [%s] and folder [%s]") %
(name, folder))
#: Return package info
return name, folder
def handle_errors(self):
if self.PASSWORD_ERROR_ROKEN in self.data:
self.fail(_("Wrong password"))
if self.captcha.task:
if self.CAPTCHA_ERROR_ROKEN in self.data:
self.retry_captcha()
else:
self.captcha.correct()
def handle_link_source(self, source):
if source == "cnl2":
return self.handle_CNL2Links()
elif source == "dlc":
return self.handle_DLC_links()
elif source == "web":
return self.handle_WEB_links()
else:
self.error(_('Unknown source type "%s"') % source)
def handle_CNL2Links(self):
self.log_debug(_("Search for CNL2 links"))
pack_links = []
m = re.search(self.CNL2_FORM_PATTERN, self.data, re.S)
if m is not None:
cnl2_form = m.group(1)
try:
(vcrypted, vjk) = self._get_cipher_params(cnl2_form)
for (crypted, jk) in zip(vcrypted, vjk):
pack_links.extend(self._get_links(crypted, jk))
except Exception:
self.log_debug(_("Unable to decrypt CNL2 links", trace=True))
return pack_links
def handle_DLC_links(self):
self.log_debug(_("Search for DLC links"))
pack_links = []
m = re.search(self.DLC_LINK_PATTERN, self.data)
if m is not None:
container_url = self.DLC_DOWNLOAD_URL + "?id=%s&dlc=1" % self.file_id
self.log_debug(
_("Downloading DLC container link [%s]") %
container_url)
try:
dlc = self.load(container_url)
dlc_filename = self.file_id + ".dlc"
dlc_filepath = fsjoin(
self.pyload.config.get(
'general',
'download_folder'),
dlc_filename)
with open(dlc_filepath, "wb") as f:
f.write(dlc)
pack_links.append(dlc_filepath)
except Exception:
self.fail(_("Unable to download DLC container"))
return pack_links
def handle_WEB_links(self):
self.log_debug(_("Search for WEB links"))
pack_links = []
params = re.findall(self.WEB_FORWARD_PATTERN, self.data)
self.log_debug(_("Decrypting %d Web links") % len(params))
for index, param in enumerate(params):
try:
url = self.WEB_FORWARD_URL + "?%s" % param
self.log_debug(
_("Decrypting Web link %d, %s") %
(index + 1, url))
res = self.load(url)
link = re.search(self.WEB_LINK_PATTERN, res).group(1)
pack_links.append(link)
except Exception as detail:
self.log_debug(
_("Error decrypting Web link %s, %s") %
(index, detail))
self.wait(4)
return pack_links
def _get_cipher_params(self, cnl2_form):
#: Get jk
jk_re = self.CNL2_FORMINPUT_PATTERN % self.CNL2_JK_KEY
vjk = re.findall(jk_re, cnl2_form, re.I)
#: Get crypted
crypted_re = self.CNL2_FORMINPUT_PATTERN % RelinkUs.CNL2_CRYPTED_KEY
vcrypted = re.findall(crypted_re, cnl2_form, re.I)
#: Log and return
self.log_debug(_("Detected %d crypted blocks") % len(vcrypted))
return vcrypted, vjk
def _get_links(self, crypted, jk):
#: Get key
jreturn = self.js.eval("%s f()" % jk)
self.log_debug(_("JsEngine returns value [%s]") % jreturn)
key = binascii.unhexlify(jreturn)
#: Decrypt
Key = key
IV = key
obj = Crypto.Cipher.AES.new(Key, Crypto.Cipher.AES.MODE_CBC, IV)
text = obj.decrypt(crypted.decode('base64'))
#: Extract links
text = text.replace("\x00", "").replace("\r", "")
links = filter(bool, text.split('\n'))
#: Log and return
self.log_debug(_("Package has %d links") % len(links))
return links
| gpl-3.0 |
uw-it-aca/canvas-analytics | data_aggregator/tests/test_cache.py | 1 | 1187 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import unittest
from django.test import TestCase
from data_aggregator.cache import DataAggregatorGCSCache
class TestDataAggregatorGCSCache(TestCase):
def test_get_cache_expiration_time(self):
cache = DataAggregatorGCSCache()
# valid urls
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1392640/analytics/student_summaries.json"),
0)
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v1/courses/1399587/analytics/users/3562797/"
"assignments.json"),
0)
# unknown service
self.assertEqual(
cache.get_cache_expiration_time(
"foobar",
"/api/v1/courses/1392640/analytics/"),
None)
# bad url
self.assertEqual(
cache.get_cache_expiration_time(
"canvas",
"/api/v2/courses/1392640/analytics/"),
None)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
benob/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/stringcomp.py | 9 | 4452 | # Natural Language Toolkit
# String Comparison Module
# Author: Tiago Tresoldi <[email protected]>
"""
String Comparison Module.
Author: Tiago Tresoldi <[email protected]>
Based on previous work by Qi Xiao Yang, Sung Sam Yuan, Li Zhao, Lu Chun,
and Sung Peng.
"""
def stringcomp (fx, fy):
"""
Return a number within C{0.0} and C{1.0} indicating the similarity between
two strings. A perfect match is C{1.0}, not match at all is C{0.0}.
This is an implementation of the string comparison algorithm (also known
as "string similarity") published by Qi Xiao Yang, Sung Sam Yuan, Li Zhao,
Lu Chun and Sun Peng in a paper called "Faster Algorithm of String
Comparison" ( http://front.math.ucdavis.edu/0112.6022 ). Please note that,
however, this implementation presents some relevant differences that
will lead to different numerical results (read the comments for more
details).
@param fx: A C{string}.
@param fy: A C{string}.
@return: A float with the value of the comparision between C{fx} and C{fy}.
C{1.0} indicates a perfect match, C{0.0} no match at all.
@rtype: C{float}
"""
# get the smaller of 'n' and 'm', and of 'fx' and 'fy'
n, m = len(fx), len(fy)
if m < n:
(n, m) = (m, n)
(fx, fy) = (fy, fx)
# Sum of the Square of the Number of the same Characters
ssnc = 0.
# My implementation presents some relevant differences to the pseudo-code
# presented in the paper by Yang et al., which in a number of cases will
# lead to different numerical results (and, while no empirical tests have
# been perfomed, I expect this to be slower than the original).
# The differences are due to two specific characteristcs of the original
# algorithm that I consider undesiderable for my purposes:
#
# 1. It does not takes into account the probable repetition of the same
# substring inside the strings to be compared (such as "you" in "where
# do you think that you are going?") because, as far as I was able to
# understand, it count only the first occurence of each substring
# found.
# 2. It does not seem to consider the high probability of having more
# than one pattern of the same length (for example, comparing between
# "abc1def" and "abc2def" seems to consider only one three-character
# pattern, "abc").
#
# Demonstrating the differences between the two algorithms (or, at least,
# between my implementation of the original and the revised one):
#
# "abc1def" and "abc2def"
# Original: 0.534
# Current: 0.606
for length in range(n, 0, -1):
while True:
length_prev_ssnc = ssnc
for i in range(len(fx)-length+1):
pattern = fx[i:i+length]
pattern_prev_ssnc = ssnc
fx_removed = False
while True:
index = fy.find(pattern)
if index != -1:
ssnc += (2.*length)**2
if fx_removed == False:
fx = fx[:i] + fx[i+length:]
fx_removed = True
fy = fy[:index] + fy[index+length:]
else:
break
if ssnc != pattern_prev_ssnc:
break
if ssnc == length_prev_ssnc:
break
return (ssnc/((n+m)**2.))**0.5
def demo ():
print "Comparison between 'python' and 'python': %.2f" % stringcomp("python", "python")
print "Comparison between 'python' and 'Python': %.2f" % stringcomp("python", "Python")
print "Comparison between 'NLTK' and 'NTLK': %.2f" % stringcomp("NLTK", "NTLK")
print "Comparison between 'abc' and 'def': %.2f" % stringcomp("abc", "def")
print "Word most similar to 'australia' in list ['canada', 'brazil', 'egypt', 'thailand', 'austria']:"
max_score = 0.0 ; best_match = None
for country in ["canada", "brazil", "egypt", "thailand", "austria"]:
score = stringcomp("australia", country)
if score > max_score:
best_match = country
max_score = score
print "(comparison between 'australia' and '%s': %.2f)" % (country, score)
print "Word most similar to 'australia' is '%s' (score: %.2f)" % (best_match, max_score)
if __name__ == "__main__":
demo()
| gpl-3.0 |
hkawasaki/kawasaki-aio8-1 | common/djangoapps/util/views.py | 3 | 8888 | import json
import logging
import sys
from django.conf import settings
from django.core.validators import ValidationError, validate_email
from django.views.decorators.csrf import requires_csrf_token
from django.views.defaults import server_error
from django.http import (Http404, HttpResponse, HttpResponseNotAllowed,
HttpResponseServerError)
from dogapi import dog_stats_api
from edxmako.shortcuts import render_to_response
import zendesk
import calc
import track.views
log = logging.getLogger(__name__)
@requires_csrf_token
def jsonable_server_error(request, template_name='500.html'):
"""
500 error handler that serves JSON on an AJAX request, and proxies
to the Django default `server_error` view otherwise.
"""
if request.is_ajax():
msg = {"error": "The edX servers encountered an error"}
return HttpResponseServerError(json.dumps(msg))
else:
return server_error(request, template_name=template_name)
def calculate(request):
''' Calculator in footer of every page. '''
equation = request.GET['equation']
try:
result = calc.evaluator({}, {}, equation)
except:
event = {'error': map(str, sys.exc_info()),
'equation': equation}
track.views.server_track(request, 'error:calc', event, page='calc')
return HttpResponse(json.dumps({'result': 'Invalid syntax'}))
return HttpResponse(json.dumps({'result': str(result)}))
class _ZendeskApi(object):
def __init__(self):
"""
Instantiate the Zendesk API.
All of `ZENDESK_URL`, `ZENDESK_USER`, and `ZENDESK_API_KEY` must be set
in `django.conf.settings`.
"""
self._zendesk_instance = zendesk.Zendesk(
settings.ZENDESK_URL,
settings.ZENDESK_USER,
settings.ZENDESK_API_KEY,
use_api_token=False,
api_version=2,
# As of 2012-05-08, Zendesk is using a CA that is not
# installed on our servers
client_args={"disable_ssl_certificate_validation": True}
)
def create_ticket(self, ticket):
"""
Create the given `ticket` in Zendesk.
The ticket should have the format specified by the zendesk package.
"""
ticket_url = self._zendesk_instance.create_ticket(data=ticket)
return zendesk.get_id_from_url(ticket_url)
def update_ticket(self, ticket_id, update):
"""
Update the Zendesk ticket with id `ticket_id` using the given `update`.
The update should have the format specified by the zendesk package.
"""
self._zendesk_instance.update_ticket(ticket_id=ticket_id, data=update)
def _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info):
"""
Create a new user-requested Zendesk ticket.
Once created, the ticket will be updated with a private comment containing
additional information from the browser and server, such as HTTP headers
and user state. Returns a boolean value indicating whether ticket creation
was successful, regardless of whether the private comment update succeeded.
"""
zendesk_api = _ZendeskApi()
additional_info_string = (
"Additional information:\n\n" +
"\n".join("%s: %s" % (key, value) for (key, value) in additional_info.items() if value is not None)
)
# Tag all issues with LMS to distinguish channel in Zendesk; requested by student support team
zendesk_tags = list(tags.values()) + ["LMS"]
new_ticket = {
"ticket": {
"requester": {"name": realname, "email": email},
"subject": subject,
"comment": {"body": details},
"tags": zendesk_tags
}
}
try:
ticket_id = zendesk_api.create_ticket(new_ticket)
except zendesk.ZendeskError as err:
log.error("Error creating Zendesk ticket: %s", str(err))
return False
# Additional information is provided as a private update so the information
# is not visible to the user.
ticket_update = {"ticket": {"comment": {"public": False, "body": additional_info_string}}}
try:
zendesk_api.update_ticket(ticket_id, ticket_update)
except zendesk.ZendeskError as err:
log.error("Error updating Zendesk ticket: %s", str(err))
# The update is not strictly necessary, so do not indicate failure to the user
pass
return True
DATADOG_FEEDBACK_METRIC = "lms_feedback_submissions"
def _record_feedback_in_datadog(tags):
datadog_tags = [u"{k}:{v}".format(k=k.encode('utf-8'), v=v.encode('utf-8')) for k, v in tags.items()]
dog_stats_api.increment(DATADOG_FEEDBACK_METRIC, tags=datadog_tags)
def submit_feedback(request):
"""
Create a new user-requested ticket, currently implemented with Zendesk.
If feedback submission is not enabled, any request will raise `Http404`.
If any configuration parameter (`ZENDESK_URL`, `ZENDESK_USER`, or
`ZENDESK_API_KEY`) is missing, any request will raise an `Exception`.
The request must be a POST request specifying `subject` and `details`.
If the user is not authenticated, the request must also specify `name` and
`email`. If the user is authenticated, the `name` and `email` will be
populated from the user's information. If any required parameter is
missing, a 400 error will be returned indicating which field is missing and
providing an error message. If Zendesk ticket creation fails, 500 error
will be returned with no body; if ticket creation succeeds, an empty
successful response (200) will be returned.
"""
if not settings.FEATURES.get('ENABLE_FEEDBACK_SUBMISSION', False):
raise Http404()
if request.method != "POST":
return HttpResponseNotAllowed(["POST"])
if (
not settings.ZENDESK_URL or
not settings.ZENDESK_USER or
not settings.ZENDESK_API_KEY
):
raise Exception("Zendesk enabled but not configured")
def build_error_response(status_code, field, err_msg):
return HttpResponse(json.dumps({"field": field, "error": err_msg}), status=status_code)
additional_info = {}
required_fields = ["subject", "details"]
if not request.user.is_authenticated():
required_fields += ["name", "email"]
required_field_errs = {
"subject": "Please provide a subject.",
"details": "Please provide details.",
"name": "Please provide your name.",
"email": "Please provide a valid e-mail.",
}
for field in required_fields:
if field not in request.POST or not request.POST[field]:
return build_error_response(400, field, required_field_errs[field])
subject = request.POST["subject"]
details = request.POST["details"]
tags = dict(
[(tag, request.POST[tag]) for tag in ["issue_type", "course_id"] if tag in request.POST]
)
if request.user.is_authenticated():
realname = request.user.profile.name
email = request.user.email
additional_info["username"] = request.user.username
else:
realname = request.POST["name"]
email = request.POST["email"]
try:
validate_email(email)
except ValidationError:
return build_error_response(400, "email", required_field_errs["email"])
for header, pretty in [
("HTTP_REFERER", "Page"),
("HTTP_USER_AGENT", "Browser"),
("REMOTE_ADDR", "Client IP"),
("SERVER_NAME", "Host")
]:
additional_info[pretty] = request.META.get(header)
success = _record_feedback_in_zendesk(realname, email, subject, details, tags, additional_info)
_record_feedback_in_datadog(tags)
return HttpResponse(status=(200 if success else 500))
def info(request):
''' Info page (link from main header) '''
return render_to_response("info.html", {})
# From http://djangosnippets.org/snippets/1042/
def parse_accept_header(accept):
"""Parse the Accept header *accept*, returning a list with pairs of
(media_type, q_value), ordered by q values.
"""
result = []
for media_range in accept.split(","):
parts = media_range.split(";")
media_type = parts.pop(0)
media_params = []
q = 1.0
for part in parts:
(key, value) = part.lstrip().split("=", 1)
if key == "q":
q = float(value)
else:
media_params.append((key, value))
result.append((media_type, tuple(media_params), q))
result.sort(lambda x, y: -cmp(x[2], y[2]))
return result
def accepts(request, media_type):
"""Return whether this request has an Accept header that matches type"""
accept = parse_accept_header(request.META.get("HTTP_ACCEPT", ""))
return media_type in [t for (t, p, q) in accept]
| agpl-3.0 |
bleib1dj/boto | boto/provider.py | 102 | 20925 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright 2010 Google Inc.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This class encapsulates the provider-specific header differences.
"""
import os
from boto.compat import six
from datetime import datetime
import boto
from boto import config
from boto.compat import expanduser
from boto.pyami.config import Config
from boto.gs.acl import ACL
from boto.gs.acl import CannedACLStrings as CannedGSACLStrings
from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings
from boto.s3.acl import Policy
HEADER_PREFIX_KEY = 'header_prefix'
METADATA_PREFIX_KEY = 'metadata_prefix'
AWS_HEADER_PREFIX = 'x-amz-'
GOOG_HEADER_PREFIX = 'x-goog-'
ACL_HEADER_KEY = 'acl-header'
AUTH_HEADER_KEY = 'auth-header'
COPY_SOURCE_HEADER_KEY = 'copy-source-header'
COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header'
COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header'
DELETE_MARKER_HEADER_KEY = 'delete-marker-header'
DATE_HEADER_KEY = 'date-header'
METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header'
RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header'
SECURITY_TOKEN_HEADER_KEY = 'security-token-header'
STORAGE_CLASS_HEADER_KEY = 'storage-class'
MFA_HEADER_KEY = 'mfa-header'
SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header'
VERSION_ID_HEADER_KEY = 'version-id-header'
RESTORE_HEADER_KEY = 'restore-header'
STORAGE_COPY_ERROR = 'StorageCopyError'
STORAGE_CREATE_ERROR = 'StorageCreateError'
STORAGE_DATA_ERROR = 'StorageDataError'
STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError'
STORAGE_RESPONSE_ERROR = 'StorageResponseError'
NO_CREDENTIALS_PROVIDED = object()
class ProfileNotFoundError(ValueError):
pass
class Provider(object):
CredentialMap = {
'aws': ('aws_access_key_id', 'aws_secret_access_key',
'aws_security_token', 'aws_profile'),
'google': ('gs_access_key_id', 'gs_secret_access_key',
None, None),
}
AclClassMap = {
'aws': Policy,
'google': ACL
}
CannedAclsMap = {
'aws': CannedS3ACLStrings,
'google': CannedGSACLStrings
}
HostKeyMap = {
'aws': 's3',
'google': 'gs'
}
ChunkedTransferSupport = {
'aws': False,
'google': True
}
MetadataServiceSupport = {
'aws': True,
'google': False
}
# If you update this map please make sure to put "None" for the
# right-hand-side for any headers that don't apply to a provider, rather
# than simply leaving that header out (which would cause KeyErrors).
HeaderInfoMap = {
'aws': {
HEADER_PREFIX_KEY: AWS_HEADER_PREFIX,
METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-',
ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl',
AUTH_HEADER_KEY: 'AWS',
COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source',
COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-version-id',
COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-range',
DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date',
DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker',
METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX +
'metadata-directive',
RESUMABLE_UPLOAD_HEADER_KEY: None,
SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token',
SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX +
'server-side-encryption',
VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id',
STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class',
MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa',
RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore',
},
'google': {
HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX,
METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-',
ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl',
AUTH_HEADER_KEY: 'GOOG1',
COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source',
COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX +
'copy-source-version-id',
COPY_SOURCE_RANGE_HEADER_KEY: None,
DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date',
DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker',
METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX +
'metadata-directive',
RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable',
SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token',
SERVER_SIDE_ENCRYPTION_KEY: None,
# Note that this version header is not to be confused with
# the Google Cloud Storage 'x-goog-api-version' header.
VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id',
STORAGE_CLASS_HEADER_KEY: None,
MFA_HEADER_KEY: None,
RESTORE_HEADER_KEY: None,
}
}
ErrorMap = {
'aws': {
STORAGE_COPY_ERROR: boto.exception.S3CopyError,
STORAGE_CREATE_ERROR: boto.exception.S3CreateError,
STORAGE_DATA_ERROR: boto.exception.S3DataError,
STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError,
STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError,
},
'google': {
STORAGE_COPY_ERROR: boto.exception.GSCopyError,
STORAGE_CREATE_ERROR: boto.exception.GSCreateError,
STORAGE_DATA_ERROR: boto.exception.GSDataError,
STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError,
STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError,
}
}
def __init__(self, name, access_key=None, secret_key=None,
security_token=None, profile_name=None):
self.host = None
self.port = None
self.host_header = None
self.access_key = access_key
self.secret_key = secret_key
self.security_token = security_token
self.profile_name = profile_name
self.name = name
self.acl_class = self.AclClassMap[self.name]
self.canned_acls = self.CannedAclsMap[self.name]
self._credential_expiry_time = None
# Load shared credentials file if it exists
shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
self.shared_credentials = Config(do_load=False)
if os.path.isfile(shared_path):
self.shared_credentials.load_from_path(shared_path)
self.get_credentials(access_key, secret_key, security_token, profile_name)
self.configure_headers()
self.configure_errors()
# Allow config file to override default host and port.
host_opt_name = '%s_host' % self.HostKeyMap[self.name]
if config.has_option('Credentials', host_opt_name):
self.host = config.get('Credentials', host_opt_name)
port_opt_name = '%s_port' % self.HostKeyMap[self.name]
if config.has_option('Credentials', port_opt_name):
self.port = config.getint('Credentials', port_opt_name)
host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
if config.has_option('Credentials', host_header_opt_name):
self.host_header = config.get('Credentials', host_header_opt_name)
def get_access_key(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._access_key
def set_access_key(self, value):
self._access_key = value
access_key = property(get_access_key, set_access_key)
def get_secret_key(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._secret_key
def set_secret_key(self, value):
self._secret_key = value
secret_key = property(get_secret_key, set_secret_key)
def get_security_token(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._security_token
def set_security_token(self, value):
self._security_token = value
security_token = property(get_security_token, set_security_token)
def _credentials_need_refresh(self):
if self._credential_expiry_time is None:
return False
else:
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
delta = self._credential_expiry_time - datetime.utcnow()
# python2.6 does not have timedelta.total_seconds() so we have
# to calculate this ourselves. This is straight from the
# datetime docs.
seconds_left = (
(delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
* 10 ** 6) / 10 ** 6)
if seconds_left < (5 * 60):
boto.log.debug("Credentials need to be refreshed.")
return True
else:
return False
def get_credentials(self, access_key=None, secret_key=None,
security_token=None, profile_name=None):
access_key_name, secret_key_name, security_token_name, \
profile_name_name = self.CredentialMap[self.name]
# Load profile from shared environment variable if it was not
# already passed in and the environment variable exists
if profile_name is None and profile_name_name is not None and \
profile_name_name.upper() in os.environ:
profile_name = os.environ[profile_name_name.upper()]
shared = self.shared_credentials
if access_key is not None:
self.access_key = access_key
boto.log.debug("Using access key provided by client.")
elif access_key_name.upper() in os.environ:
self.access_key = os.environ[access_key_name.upper()]
boto.log.debug("Using access key found in environment variable.")
elif profile_name is not None:
if shared.has_option(profile_name, access_key_name):
self.access_key = shared.get(profile_name, access_key_name)
boto.log.debug("Using access key found in shared credential "
"file for profile %s." % profile_name)
elif config.has_option("profile %s" % profile_name,
access_key_name):
self.access_key = config.get("profile %s" % profile_name,
access_key_name)
boto.log.debug("Using access key found in config file: "
"profile %s." % profile_name)
else:
raise ProfileNotFoundError('Profile "%s" not found!' %
profile_name)
elif shared.has_option('default', access_key_name):
self.access_key = shared.get('default', access_key_name)
boto.log.debug("Using access key found in shared credential file.")
elif config.has_option('Credentials', access_key_name):
self.access_key = config.get('Credentials', access_key_name)
boto.log.debug("Using access key found in config file.")
if secret_key is not None:
self.secret_key = secret_key
boto.log.debug("Using secret key provided by client.")
elif secret_key_name.upper() in os.environ:
self.secret_key = os.environ[secret_key_name.upper()]
boto.log.debug("Using secret key found in environment variable.")
elif profile_name is not None:
if shared.has_option(profile_name, secret_key_name):
self.secret_key = shared.get(profile_name, secret_key_name)
boto.log.debug("Using secret key found in shared credential "
"file for profile %s." % profile_name)
elif config.has_option("profile %s" % profile_name, secret_key_name):
self.secret_key = config.get("profile %s" % profile_name,
secret_key_name)
boto.log.debug("Using secret key found in config file: "
"profile %s." % profile_name)
else:
raise ProfileNotFoundError('Profile "%s" not found!' %
profile_name)
elif shared.has_option('default', secret_key_name):
self.secret_key = shared.get('default', secret_key_name)
boto.log.debug("Using secret key found in shared credential file.")
elif config.has_option('Credentials', secret_key_name):
self.secret_key = config.get('Credentials', secret_key_name)
boto.log.debug("Using secret key found in config file.")
elif config.has_option('Credentials', 'keyring'):
keyring_name = config.get('Credentials', 'keyring')
try:
import keyring
except ImportError:
boto.log.error("The keyring module could not be imported. "
"For keyring support, install the keyring "
"module.")
raise
self.secret_key = keyring.get_password(
keyring_name, self.access_key)
boto.log.debug("Using secret key found in keyring.")
if security_token is not None:
self.security_token = security_token
boto.log.debug("Using security token provided by client.")
elif ((security_token_name is not None) and
(access_key is None) and (secret_key is None)):
# Only provide a token from the environment/config if the
# caller did not specify a key and secret. Otherwise an
# environment/config token could be paired with a
# different set of credentials provided by the caller
if security_token_name.upper() in os.environ:
self.security_token = os.environ[security_token_name.upper()]
boto.log.debug("Using security token found in environment"
" variable.")
elif shared.has_option(profile_name or 'default',
security_token_name):
self.security_token = shared.get(profile_name or 'default',
security_token_name)
boto.log.debug("Using security token found in shared "
"credential file.")
elif profile_name is not None:
if config.has_option("profile %s" % profile_name,
security_token_name):
boto.log.debug("config has option")
self.security_token = config.get("profile %s" % profile_name,
security_token_name)
boto.log.debug("Using security token found in config file: "
"profile %s." % profile_name)
elif config.has_option('Credentials', security_token_name):
self.security_token = config.get('Credentials',
security_token_name)
boto.log.debug("Using security token found in config file.")
if ((self._access_key is None or self._secret_key is None) and
self.MetadataServiceSupport[self.name]):
self._populate_keys_from_metadata_server()
self._secret_key = self._convert_key_to_str(self._secret_key)
def _populate_keys_from_metadata_server(self):
# get_instance_metadata is imported here because of a circular
# dependency.
boto.log.debug("Retrieving credentials from metadata server.")
from boto.utils import get_instance_metadata
timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
# The num_retries arg is actually the total number of attempts made,
# so the config options is named *_num_attempts to make this more
# clear to users.
metadata = get_instance_metadata(
timeout=timeout, num_retries=attempts,
data='meta-data/iam/security-credentials/')
if metadata:
# I'm assuming there's only one role on the instance profile.
security = list(metadata.values())[0]
self._access_key = security['AccessKeyId']
self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
self._security_token = security['Token']
expires_at = security['Expiration']
self._credential_expiry_time = datetime.strptime(
expires_at, "%Y-%m-%dT%H:%M:%SZ")
boto.log.debug("Retrieved credentials will expire in %s at: %s",
self._credential_expiry_time - datetime.now(), expires_at)
def _convert_key_to_str(self, key):
if isinstance(key, six.text_type):
# the secret key must be bytes and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
return str(key)
return key
def configure_headers(self):
header_info_map = self.HeaderInfoMap[self.name]
self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]
self.header_prefix = header_info_map[HEADER_PREFIX_KEY]
self.acl_header = header_info_map[ACL_HEADER_KEY]
self.auth_header = header_info_map[AUTH_HEADER_KEY]
self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
self.copy_source_version_id = header_info_map[
COPY_SOURCE_VERSION_ID_HEADER_KEY]
self.copy_source_range_header = header_info_map[
COPY_SOURCE_RANGE_HEADER_KEY]
self.date_header = header_info_map[DATE_HEADER_KEY]
self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
self.metadata_directive_header = (
header_info_map[METADATA_DIRECTIVE_HEADER_KEY])
self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY]
self.resumable_upload_header = (
header_info_map[RESUMABLE_UPLOAD_HEADER_KEY])
self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY]
self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY]
self.version_id = header_info_map[VERSION_ID_HEADER_KEY]
self.mfa_header = header_info_map[MFA_HEADER_KEY]
self.restore_header = header_info_map[RESTORE_HEADER_KEY]
def configure_errors(self):
error_map = self.ErrorMap[self.name]
self.storage_copy_error = error_map[STORAGE_COPY_ERROR]
self.storage_create_error = error_map[STORAGE_CREATE_ERROR]
self.storage_data_error = error_map[STORAGE_DATA_ERROR]
self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR]
self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR]
def get_provider_name(self):
return self.HostKeyMap[self.name]
def supports_chunked_transfer(self):
return self.ChunkedTransferSupport[self.name]
# Static utility method for getting default Provider.
def get_default():
return Provider('aws')
| mit |
oliciv/youtube-dl | youtube_dl/extractor/soundcloud.py | 17 | 17239 | # encoding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
int_or_none,
unified_strdate,
)
class SoundcloudIE(InfoExtractor):
"""Information extractor for soundcloud.com
To access the media, the uid of the song and a stream token
must be extracted from the page source and the script must make
a request to media.soundcloud.com/crossdomain.xml. Then
the media can be grabbed by requesting from an url composed
of the stream token and uid
"""
_VALID_URL = r'''(?x)^(?:https?://)?
(?:(?:(?:www\.|m\.)?soundcloud\.com/
(?P<uploader>[\w\d-]+)/
(?!(?:tracks|sets(?:/[^/?#]+)?|reposts|likes|spotlight)/?(?:$|[?#]))
(?P<title>[\w\d-]+)/?
(?P<token>[^?]+?)?(?:[?].*)?$)
|(?:api\.soundcloud\.com/tracks/(?P<track_id>\d+)
(?:/?\?secret_token=(?P<secret_token>[^&]+))?)
|(?P<player>(?:w|player|p.)\.soundcloud\.com/player/?.*?url=.*)
)
'''
IE_NAME = 'soundcloud'
_TESTS = [
{
'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
'info_dict': {
'id': '62986583',
'ext': 'mp3',
'upload_date': '20121011',
'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
'uploader': 'E.T. ExTerrestrial Music',
'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
'duration': 143,
}
},
# not streamable song
{
'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '47127627',
'ext': 'mp3',
'title': 'Goldrushed',
'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
'uploader': 'The Royal Concept',
'upload_date': '20120521',
'duration': 227,
},
'params': {
# rtmp
'skip_download': True,
},
},
# private link
{
'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'uploader': 'jaimeMF',
'description': 'test chars: \"\'/\\ä↭',
'upload_date': '20131209',
'duration': 9,
},
},
# private link (alt format)
{
'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
'info_dict': {
'id': '123998367',
'ext': 'mp3',
'title': 'Youtube - Dl Test Video \'\' Ä↭',
'uploader': 'jaimeMF',
'description': 'test chars: \"\'/\\ä↭',
'upload_date': '20131209',
'duration': 9,
},
},
# downloadable song
{
'url': 'https://soundcloud.com/oddsamples/bus-brakes',
'md5': '7624f2351f8a3b2e7cd51522496e7631',
'info_dict': {
'id': '128590877',
'ext': 'mp3',
'title': 'Bus Brakes',
'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
'uploader': 'oddsamples',
'upload_date': '20140109',
'duration': 17,
},
},
]
_CLIENT_ID = '02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea'
_IPHONE_CLIENT_ID = '376f225bf427445fc4bfb6b99b72e0bf'
def report_resolve(self, video_id):
"""Report information extraction."""
self.to_screen('%s: Resolving id' % video_id)
@classmethod
def _resolv_url(cls, url):
return 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=' + cls._CLIENT_ID
def _extract_info_dict(self, info, full_title=None, quiet=False, secret_token=None):
track_id = compat_str(info['id'])
name = full_title or track_id
if quiet:
self.report_extraction(name)
thumbnail = info['artwork_url']
if thumbnail is not None:
thumbnail = thumbnail.replace('-large', '-t500x500')
ext = 'mp3'
result = {
'id': track_id,
'uploader': info['user']['username'],
'upload_date': unified_strdate(info['created_at']),
'title': info['title'],
'description': info['description'],
'thumbnail': thumbnail,
'duration': int_or_none(info.get('duration'), 1000),
'webpage_url': info.get('permalink_url'),
}
formats = []
if info.get('downloadable', False):
# We can build a direct link to the song
format_url = (
'https://api.soundcloud.com/tracks/{0}/download?client_id={1}'.format(
track_id, self._CLIENT_ID))
formats.append({
'format_id': 'download',
'ext': info.get('original_format', 'mp3'),
'url': format_url,
'vcodec': 'none',
'preference': 10,
})
# We have to retrieve the url
streams_url = ('http://api.soundcloud.com/i1/tracks/{0}/streams?'
'client_id={1}&secret_token={2}'.format(track_id, self._IPHONE_CLIENT_ID, secret_token))
format_dict = self._download_json(
streams_url,
track_id, 'Downloading track url')
for key, stream_url in format_dict.items():
if key.startswith('http'):
formats.append({
'format_id': key,
'ext': ext,
'url': stream_url,
'vcodec': 'none',
})
elif key.startswith('rtmp'):
# The url doesn't have an rtmp app, we have to extract the playpath
url, path = stream_url.split('mp3:', 1)
formats.append({
'format_id': key,
'url': url,
'play_path': 'mp3:' + path,
'ext': 'flv',
'vcodec': 'none',
})
if not formats:
# We fallback to the stream_url in the original info, this
# cannot be always used, sometimes it can give an HTTP 404 error
formats.append({
'format_id': 'fallback',
'url': info['stream_url'] + '?client_id=' + self._CLIENT_ID,
'ext': ext,
'vcodec': 'none',
})
for f in formats:
if f['format_id'].startswith('http'):
f['protocol'] = 'http'
if f['format_id'].startswith('rtmp'):
f['protocol'] = 'rtmp'
self._check_formats(formats, track_id)
self._sort_formats(formats)
result['formats'] = formats
return result
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
track_id = mobj.group('track_id')
token = None
if track_id is not None:
info_json_url = 'http://api.soundcloud.com/tracks/' + track_id + '.json?client_id=' + self._CLIENT_ID
full_title = track_id
token = mobj.group('secret_token')
if token:
info_json_url += "&secret_token=" + token
elif mobj.group('player'):
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
real_url = query['url'][0]
# If the token is in the query of the original url we have to
# manually add it
if 'secret_token' in query:
real_url += '?secret_token=' + query['secret_token'][0]
return self.url_result(real_url)
else:
# extract uploader (which is in the url)
uploader = mobj.group('uploader')
# extract simple title (uploader + slug of song title)
slug_title = mobj.group('title')
token = mobj.group('token')
full_title = resolve_title = '%s/%s' % (uploader, slug_title)
if token:
resolve_title += '/%s' % token
self.report_resolve(full_title)
url = 'http://soundcloud.com/%s' % resolve_title
info_json_url = self._resolv_url(url)
info = self._download_json(info_json_url, full_title, 'Downloading info JSON')
return self._extract_info_dict(info, full_title, secret_token=token)
class SoundcloudSetIE(SoundcloudIE):
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
IE_NAME = 'soundcloud:set'
_TESTS = [{
'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
'info_dict': {
'id': '2284613',
'title': 'The Royal Concept EP',
},
'playlist_mincount': 6,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
# extract uploader (which is in the url)
uploader = mobj.group('uploader')
# extract simple title (uploader + slug of song title)
slug_title = mobj.group('slug_title')
full_title = '%s/sets/%s' % (uploader, slug_title)
url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
token = mobj.group('token')
if token:
full_title += '/' + token
url += '/' + token
self.report_resolve(full_title)
resolv_url = self._resolv_url(url)
info = self._download_json(resolv_url, full_title)
if 'errors' in info:
msgs = (compat_str(err['error_message']) for err in info['errors'])
raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))
entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in info['tracks']]
return {
'_type': 'playlist',
'entries': entries,
'id': '%s' % info['id'],
'title': info['title'],
}
class SoundcloudUserIE(SoundcloudIE):
_VALID_URL = r'''(?x)
https?://
(?:(?:www|m)\.)?soundcloud\.com/
(?P<user>[^/]+)
(?:/
(?P<rsrc>tracks|sets|reposts|likes|spotlight)
)?
/?(?:[?#].*)?$
'''
IE_NAME = 'soundcloud:user'
_TESTS = [{
'url': 'https://soundcloud.com/the-akashic-chronicler',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (All)',
},
'playlist_mincount': 111,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/tracks',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Tracks)',
},
'playlist_mincount': 50,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/sets',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Playlists)',
},
'playlist_mincount': 3,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/reposts',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Reposts)',
},
'playlist_mincount': 7,
}, {
'url': 'https://soundcloud.com/the-akashic-chronicler/likes',
'info_dict': {
'id': '114582580',
'title': 'The Akashic Chronicler (Likes)',
},
'playlist_mincount': 321,
}, {
'url': 'https://soundcloud.com/grynpyret/spotlight',
'info_dict': {
'id': '7098329',
'title': 'Grynpyret (Spotlight)',
},
'playlist_mincount': 1,
}]
_API_BASE = 'https://api.soundcloud.com'
_API_V2_BASE = 'https://api-v2.soundcloud.com'
_BASE_URL_MAP = {
'all': '%s/profile/soundcloud:users:%%s' % _API_V2_BASE,
'tracks': '%s/users/%%s/tracks' % _API_BASE,
'sets': '%s/users/%%s/playlists' % _API_V2_BASE,
'reposts': '%s/profile/soundcloud:users:%%s/reposts' % _API_V2_BASE,
'likes': '%s/users/%%s/likes' % _API_V2_BASE,
'spotlight': '%s/users/%%s/spotlight' % _API_V2_BASE,
}
_TITLE_MAP = {
'all': 'All',
'tracks': 'Tracks',
'sets': 'Playlists',
'reposts': 'Reposts',
'likes': 'Likes',
'spotlight': 'Spotlight',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader = mobj.group('user')
url = 'http://soundcloud.com/%s/' % uploader
resolv_url = self._resolv_url(url)
user = self._download_json(
resolv_url, uploader, 'Downloading user info')
resource = mobj.group('rsrc') or 'all'
base_url = self._BASE_URL_MAP[resource] % user['id']
next_href = None
entries = []
for i in itertools.count():
if not next_href:
data = compat_urllib_parse.urlencode({
'offset': i * 50,
'limit': 50,
'client_id': self._CLIENT_ID,
'linked_partitioning': '1',
'representation': 'speedy',
})
next_href = base_url + '?' + data
response = self._download_json(
next_href, uploader, 'Downloading track page %s' % (i + 1))
collection = response['collection']
if not collection:
self.to_screen('%s: End page received' % uploader)
break
def resolve_permalink_url(candidates):
for cand in candidates:
if isinstance(cand, dict):
permalink_url = cand.get('permalink_url')
if permalink_url and permalink_url.startswith('http'):
return permalink_url
for e in collection:
permalink_url = resolve_permalink_url((e, e.get('track'), e.get('playlist')))
if permalink_url:
entries.append(self.url_result(permalink_url))
if 'next_href' in response:
next_href = response['next_href']
if not next_href:
break
else:
next_href = None
return {
'_type': 'playlist',
'id': compat_str(user['id']),
'title': '%s (%s)' % (user['username'], self._TITLE_MAP[resource]),
'entries': entries,
}
class SoundcloudPlaylistIE(SoundcloudIE):
_VALID_URL = r'https?://api\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
IE_NAME = 'soundcloud:playlist'
_TESTS = [{
'url': 'http://api.soundcloud.com/playlists/4110309',
'info_dict': {
'id': '4110309',
'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
'description': 're:.*?TILT Brass - Bowery Poetry Club',
},
'playlist_count': 6,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
base_url = '%s//api.soundcloud.com/playlists/%s.json?' % (self.http_scheme(), playlist_id)
data_dict = {
'client_id': self._CLIENT_ID,
}
token = mobj.group('token')
if token:
data_dict['secret_token'] = token
data = compat_urllib_parse.urlencode(data_dict)
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
entries = [self.url_result(track['permalink_url'], 'Soundcloud') for track in data['tracks']]
return {
'_type': 'playlist',
'id': playlist_id,
'title': data.get('title'),
'description': data.get('description'),
'entries': entries,
}
| unlicense |
junmin-zhu/chromium-rivertrail | chrome/test/functional/stress.py | 3 | 29618 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Stess Tests for Google Chrome.
This script runs 4 different stress tests:
1. Plugin stress.
2. Back and forward stress.
3. Download stress.
4. Preference stress.
After every cycle (running all 4 stress tests) it checks for crashes.
If there are any crashes, the script generates a report, uploads it to
a server and mails about the crash and the link to the report on the server.
Apart from this whenever the test stops on mac it looks for and reports
zombies.
Prerequisites:
Test needs the following files/folders in the Data dir.
1. A crash_report tool in "pyauto_private/stress/mac" folder for use on Mac.
2. A "downloads" folder containing stress_downloads and all the files
referenced in it.
3. A pref_dict file in "pyauto_private/stress/mac" folder.
4. A "plugin" folder containing doubleAnimation.xaml, flash.swf, FlashSpin.swf,
generic.html, get_flash_player.gif, js-invoker.swf, mediaplayer.wmv,
NavigatorTicker11.class, Plugins_page.html, sample5.mov, silverlight.xaml,
silverlight.js, embed.pdf, plugins_page.html and test6.swf.
5. A stress_pref file in "pyauto_private/stress".
"""
import commands
import glob
import logging
import os
import random
import re
import shutil
import sys
import time
import urllib
import test_utils
import subprocess
import pyauto_functional
import pyauto
import pyauto_utils
CRASHES = 'crashes' # Name of the folder to store crashes
class StressTest(pyauto.PyUITest):
"""Run all the stress tests."""
flash_url1 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'flash.swf'))
flash_url2 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(),'plugin', 'js-invoker.swf'))
flash_url3 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'generic.html'))
plugin_url = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'plugin', 'plugins_page.html'))
empty_url = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'empty.html'))
download_url1 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(), 'downloads', 'a_zip_file.zip'))
download_url2 = pyauto.PyUITest.GetFileURLForPath(
os.path.join(pyauto.PyUITest.DataDir(),'zip', 'test.zip'))
file_list = pyauto.PyUITest.EvalDataFrom(
os.path.join(pyauto.PyUITest.DataDir(), 'downloads', 'stress_downloads'))
symbols_dir = os.path.join(os.getcwd(), 'Build_Symbols')
stress_pref = pyauto.PyUITest.EvalDataFrom(
os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private', 'stress',
'stress_pref'))
breakpad_dir = None
chrome_version = None
bookmarks_list = []
def _FuncDir(self):
"""Returns the path to the functional dir chrome/test/functional."""
return os.path.dirname(__file__)
def _DownloadSymbols(self):
"""Downloads the symbols for the build being tested."""
download_location = os.path.join(os.getcwd(), 'Build_Symbols')
if os.path.exists(download_location):
shutil.rmtree(download_location)
os.makedirs(download_location)
url = self.stress_pref['symbols_dir'] + self.chrome_version
# TODO: Add linux symbol_files
if self.IsWin():
url = url + '/win/'
symbol_files = ['chrome_dll.pdb', 'chrome_exe.pdb']
elif self.IsMac():
url = url + '/mac/'
symbol_files = map(urllib.quote,
['Google Chrome Framework.framework',
'Google Chrome Helper.app',
'Google Chrome.app',
'crash_inspector',
'crash_report_sender',
'ffmpegsumo.so',
'libplugin_carbon_interpose.dylib'])
index = 0
symbol_files = ['%s-%s-i386.breakpad' % (sym_file, self.chrome_version) \
for sym_file in symbol_files]
logging.info(symbol_files)
for sym_file in symbol_files:
sym_url = url + sym_file
logging.info(sym_url)
download_sym_file = os.path.join(download_location, sym_file)
logging.info(download_sym_file)
urllib.urlretrieve(sym_url, download_sym_file)
def setUp(self):
pyauto.PyUITest.setUp(self)
self.breakpad_dir = self._CrashDumpFolder()
self.chrome_version = self.GetBrowserInfo()['properties']['ChromeVersion']
# Plugin stress functions
def _CheckForPluginProcess(self, plugin_name):
"""Checks if a particular plugin process exists.
Args:
plugin_name : plugin process which should be running.
"""
process = self.GetBrowserInfo()['child_processes']
self.assertTrue([x for x in process
if x['type'] == 'Plug-in' and
x['name'] == plugin_name])
def _GetPluginProcessId(self, plugin_name):
"""Get Plugin process id.
Args:
plugin_name: Plugin whose pid is expected.
Eg: "Shockwave Flash"
Returns:
Process id if the plugin process is running.
None otherwise.
"""
for process in self.GetBrowserInfo()['child_processes']:
if process['type'] == 'Plug-in' and \
re.search(plugin_name, process['name']):
return process['pid']
return None
def _CloseAllTabs(self):
"""Close all but one tab in first window."""
tab_count = self.GetTabCount(0)
for tab_index in xrange(tab_count - 1, 0, -1):
self.CloseTab(tab_index)
def _CloseAllWindows(self):
"""Close all windows except one."""
win_count = self.GetBrowserWindowCount()
for windex in xrange(win_count - 1, 0, -1):
self.RunCommand(pyauto.IDC_CLOSE_WINDOW, windex)
def _ReloadAllTabs(self):
"""Reload all the tabs in first window."""
for tab_index in range(self.GetTabCount()):
self.ReloadTab(tab_index)
def _LoadFlashInMultipleTabs(self):
"""Load Flash in multiple tabs in first window."""
self.NavigateToURL(self.empty_url)
# Open 18 tabs with flash
for _ in range(9):
self.AppendTab(pyauto.GURL(self.flash_url1))
self.AppendTab(pyauto.GURL(self.flash_url2))
def _OpenAndCloseMultipleTabsWithFlash(self):
"""Stress test for flash in multiple tabs."""
logging.info("In _OpenAndCloseMultipleWindowsWithFlash.")
self._LoadFlashInMultipleTabs()
self._CheckForPluginProcess('Shockwave Flash')
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithFlash(self):
"""Stress test for flash in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithFlash.')
# Open 5 Normal and 4 Incognito windows
for tab_index in range(1, 10):
if tab_index < 6:
self.OpenNewBrowserWindow(True)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(self.flash_url2, tab_index, 0)
self.AppendTab(pyauto.GURL(self.flash_url2), tab_index)
self._CloseAllWindows()
def _OpenAndCloseMultipleTabsWithMultiplePlugins(self):
"""Stress test using multiple plugins in multiple tabs."""
logging.info('In _OpenAndCloseMultipleTabsWithMultiplePlugins.')
# Append 4 tabs with URL
for _ in range(5):
self.AppendTab(pyauto.GURL(self.plugin_url))
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithMultiplePlugins(self):
"""Stress test using multiple plugins in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithMultiplePlugins.')
# Open 4 windows with URL
for tab_index in range(1, 5):
if tab_index < 6:
self.OpenNewBrowserWindow(True)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(self.plugin_url, tab_index, 0)
self._CloseAllWindows()
def _KillAndReloadFlash(self):
"""Stress test by killing flash process and reloading tabs."""
self._LoadFlashInMultipleTabs()
flash_process_id1 = self._GetPluginProcessId('Shockwave Flash')
self.Kill(flash_process_id1)
self._ReloadAllTabs()
self._CloseAllTabs()
def _KillAndReloadRenderersWithFlash(self):
"""Stress test by killing renderer processes and reloading tabs."""
logging.info('In _KillAndReloadRenderersWithFlash')
self._LoadFlashInMultipleTabs()
info = self.GetBrowserInfo()
# Kill all renderer processes
for tab_index in range(self.GetTabCount(0)):
self.KillRendererProcess(
info['windows'][0]['tabs'][tab_index]['renderer_pid'])
self._ReloadAllTabs()
self._CloseAllTabs()
def _TogglePlugin(self, plugin_name):
"""Toggle plugin status.
Args:
plugin_name: Name of the plugin to toggle.
"""
plugins = self.GetPluginsInfo().Plugins()
for item in range(len(plugins)):
if re.search(plugin_name, plugins[item]['name']):
if plugins[item]['enabled']:
self.DisablePlugin(plugins[item]['path'])
else:
self.EnablePlugin(plugins[item]['path'])
def _ToggleAndReloadFlashPlugin(self):
"""Toggle flash and reload all tabs."""
logging.info('In _ToggleAndReloadFlashPlugin')
for _ in range(10):
self.AppendTab(pyauto.GURL(self.flash_url3))
# Disable Flash Plugin
self._TogglePlugin('Shockwave Flash')
self._ReloadAllTabs()
# Enable Flash Plugin
self._TogglePlugin('Shockwave Flash')
self._ReloadAllTabs()
self._CloseAllTabs()
# Downloads stress functions
def _LoadDownloadsInMultipleTabs(self):
"""Load Downloads in multiple tabs in the same window."""
# Open 15 tabs with downloads
logging.info('In _LoadDownloadsInMultipleTabs')
for tab_index in range(15):
# We open an empty tab and then downlad a file from it.
self.AppendTab(pyauto.GURL(self.empty_url))
self.NavigateToURL(self.download_url1, 0, tab_index + 1)
self.AppendTab(pyauto.GURL(self.empty_url))
self.NavigateToURL(self.download_url2, 0, tab_index + 2)
def _OpenAndCloseMultipleTabsWithDownloads(self):
"""Download items in multiple tabs."""
logging.info('In _OpenAndCloseMultipleTabsWithDownloads')
self._LoadDownloadsInMultipleTabs()
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithDownloads(self):
"""Randomly have downloads in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithDownloads')
# Open 15 Windows randomly on both regular and incognito with downloads
for window_index in range(15):
tick = round(random.random() * 100)
if tick % 2 != 0:
self.NavigateToURL(self.download_url2, 0, 0)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.AppendTab(pyauto.GURL(self.empty_url), 1)
self.NavigateToURL(self.download_url2, 1, 1)
self._CloseAllWindows()
def _OpenAndCloseMultipleTabsWithMultipleDownloads(self):
"""Download multiple items in multiple tabs."""
logging.info('In _OpenAndCloseMultipleTabsWithMultipleDownloads')
self.NavigateToURL(self.empty_url)
for _ in range(15):
for file in self.file_list:
count = 1
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'downloads', file))
self.AppendTab(pyauto.GURL(self.empty_url))
self.NavigateToURL(url, 0, count)
count = count + 1
self._CloseAllTabs()
def _OpenAndCloseMultipleWindowsWithMultipleDownloads(self):
"""Randomly multiple downloads in multiple windows."""
logging.info('In _OpenAndCloseMultipleWindowsWithMultipleDownloads')
for _ in range(15):
for file in self.file_list:
tick = round(random.random() * 100)
url = self.GetFileURLForPath(
os.path.join(self.DataDir(), 'downloads', file))
if tick % 2!= 0:
self.NavigateToURL(url, 0, 0)
else:
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.AppendTab(pyauto.GURL(self.empty_url), 1)
self.NavigateToURL(url, 1, 1)
self._CloseAllWindows()
# Back and Forward stress functions
def _BrowserGoBack(self, window_index):
"""Go back in the browser history.
Chrome has limitation on going back and can only go back 49 pages.
Args:
window_index: the index of the browser window to work on.
"""
for nback in range(48): # Go back 48 times.
if nback % 4 == 0: # Bookmark every 5th url when going back.
self._BookMarkEvery5thURL(window_index)
self.TabGoBack(tab_index=0, windex=window_index)
def _BrowserGoForward(self, window_index):
"""Go Forward in the browser history.
Chrome has limitation on going back and can only go back 49 pages.
Args:
window_index: the index of the browser window to work on.
"""
for nforward in range(48): # Go back 48 times.
if nforward % 4 == 0: # Bookmark every 5th url when going Forward
self._BookMarkEvery5thURL(window_index)
self.TabGoForward(tab_index=0, windex=window_index)
def _AddToListAndBookmark(self, newname, url):
"""Bookmark the url to bookmarkbar and to he list of bookmarks.
Args:
newname: the name of the bookmark.
url: the url to bookmark.
"""
bookmarks = self.GetBookmarkModel()
bar_id = bookmarks.BookmarkBar()['id']
self.AddBookmarkURL(bar_id, 0, newname, url)
self.bookmarks_list.append(newname)
def _RemoveFromListAndBookmarkBar(self, name):
"""Remove the bookmark bor and bookmarks list.
Args:
name: the name of bookmark to remove.
"""
bookmarks = self.GetBookmarkModel()
node = bookmarks.FindByTitle(name)
self.RemoveBookmark(node[0]['id'])
self.bookmarks_list.remove(name)
def _DuplicateBookmarks(self, name):
"""Find duplicate bookmark in the bookmarks list.
Args:
name: name of the bookmark.
Returns:
True if it's a duplicate.
"""
for index in (self.bookmarks_list):
if index == name:
return True
return False
def _BookMarkEvery5thURL(self, window_index):
"""Check for duplicate in list and bookmark current url.
If its the first time and list is empty add the bookmark.
If its a duplicate remove the bookmark.
If its new tab page move over.
Args:
window_index: the index of the browser window to work on.
"""
tab_title = self.GetActiveTabTitle(window_index) # get the page title
url = self.GetActiveTabURL(window_index).spec() # get the page url
if not self.bookmarks_list:
self._AddToListAndBookmark(tab_title, url) # first run bookmark the url
return
elif self._DuplicateBookmarks(tab_title):
self._RemoveFromListAndBookmarkBar(tab_title)
return
elif tab_title == 'New Tab': # new tab page pass over
return
else:
# new bookmark add it to bookmarkbar
self._AddToListAndBookmark(tab_title, url)
return
def _ReadFileAndLoadInNormalAndIncognito(self):
"""Read urls and load them in normal and incognito window.
We load 96 urls only as we can go back and forth 48 times.
Uses time to get different urls in normal and incognito window
The source file is taken from stress folder in /data folder.
"""
# URL source from stress folder in data folder
data_file = os.path.join(self.DataDir(), 'pyauto_private', 'stress',
'urls_and_titles')
url_data = self.EvalDataFrom(data_file)
urls = url_data.keys()
i = 0
ticks = int(time.time()) # get the latest time.
for url in urls:
if i <= 96 : # load only 96 urls.
if ticks % 2 == 0: # loading in Incognito and Normal window.
self.NavigateToURL(url)
else:
self.NavigateToURL(url, 1, 0)
else:
break
ticks = ticks - 1
i += 1
return
def _StressTestNavigation(self):
""" This is the method from where various navigations are called.
First we load the urls then call navigete back and forth in
incognito window then in normal window.
"""
self._ReadFileAndLoadInNormalAndIncognito() # Load the urls.
self._BrowserGoBack(1) # Navigate back in incognito window.
self._BrowserGoForward(1) # Navigate forward in incognito window
self._BrowserGoBack(0) # Navigate back in normal window
self._BrowserGoForward(0) # Navigate forward in normal window
# Preference stress functions
def _RandomBool(self):
"""For any preferences bool value, it takes True or False value.
We are generating random True or False value.
"""
return random.randint(0, 1) == 1
def _RandomURL(self):
"""Some of preferences take string url, so generating random url here."""
# Site list
site_list = ['test1.html', 'test2.html','test3.html','test4.html',
'test5.html', 'test7.html', 'test6.html']
random_site = random.choice(site_list)
# Returning a url of random site
return self.GetFileURLForPath(os.path.join(self.DataDir(), random_site))
def _RandomURLArray(self):
"""Returns a list of 10 random URLs."""
return [self._RandomURL() for _ in range(10)]
def _RandomInt(self, max_number):
"""Some of the preferences takes integer value.
Eg: If there are three options, we generate random
value for any option.
Arg:
max_number: The number of options that a preference has.
"""
return random.randrange(1, max_number)
def _RandomDownloadDir(self):
"""Returns a random download directory."""
return random.choice(['dl_dir1', 'dl_dir2', 'dl_dir3',
'dl_dir4', 'dl_dir5'])
def _SetPref(self):
"""Reads the preferences from file and
sets the preferences to Chrome.
"""
raw_dictionary = self.EvalDataFrom(os.path.join(self.DataDir(),
'pyauto_private', 'stress', 'pref_dict'))
value_dictionary = {}
for key, value in raw_dictionary.iteritems():
if value == 'BOOL':
value_dictionary[key] = self._RandomBool()
elif value == 'STRING_URL':
value_dictionary[key] = self._RandomURL()
elif value == 'ARRAY_URL':
value_dictionary[key] = self._RandomURLArray()
elif value == 'STRING_PATH':
value_dictionary[key] = self._RandomDownloadDir()
elif value[0:3] == 'INT':
# Normally we difine INT datatype with number of options,
# so parsing number of options and selecting any of them
# randomly.
value_dictionary[key] = 1
max_number = raw_dictionary[key][3:4]
if not max_number == 1:
value_dictionary[key]= self._RandomInt(int(max_number))
self.SetPrefs(getattr(pyauto,key), value_dictionary[key])
return value_dictionary
# Crash reporting functions
def _CrashDumpFolder(self):
"""Get the breakpad folder.
Returns:
The full path of the Crash Reports folder.
"""
breakpad_folder = self.GetBrowserInfo()['properties']['DIR_CRASH_DUMPS']
self.assertTrue(breakpad_folder, 'Cannot figure crash dir')
return breakpad_folder
def _DeleteDumps(self):
"""Delete all the dump files in teh Crash Reports folder."""
# should be called at the start of stress run
if os.path.exists(self.breakpad_dir):
logging.info('xxxxxxxxxxxxxxxINSIDE DELETE DUMPSxxxxxxxxxxxxxxxxx')
if self.IsMac():
shutil.rmtree(self.breakpad_dir)
elif self.IsWin():
files = os.listdir(self.breakpad_dir)
for file in files:
os.remove(file)
first_crash = os.path.join(os.getcwd(), '1stcrash')
crashes_dir = os.path.join(os.getcwd(), 'crashes')
if (os.path.exists(crashes_dir)):
shutil.rmtree(crashes_dir)
shutil.rmtree(first_crash)
def _SymbolicateCrashDmp(self, dmp_file, symbols_dir, output_file):
"""Generate symbolicated crash report.
Args:
dmp_file: the dmp file to symbolicate.
symbols_dir: the directory containing the symbols.
output_file: the output file.
Returns:
Crash report text.
"""
report = ''
if self.IsWin():
windbg_cmd = [
os.path.join('C:', 'Program Files', 'Debugging Tools for Windows',
'windbg.exe'),
'-Q',
'-y',
'\"',
symbols_dir,
'\"',
'-c',
'\".ecxr;k50;.logclose;q\"',
'-logo',
output_file,
'-z',
'\"',
dmp_file,
'\"']
subprocess.call(windbg_cmd)
# Since we are directly writing the info into output_file,
# we just need to copy that in to report
report = open(output_file, 'r').read()
elif self.IsMac():
crash_report = os.path.join(self.DataDir(), 'pyauto_private', 'stress',
'mac', 'crash_report')
for i in range(5): # crash_report doesn't work sometimes. So we retry
report = test_utils.Shell2(
'%s -S "%s" "%s"' % (crash_report, symbols_dir, dmp_file))[0]
if len(report) < 200:
try_again = 'Try %d. crash_report didn\'t work out. Trying again', i
logging.info(try_again)
else:
break
open(output_file, 'w').write(report)
return report
def _SaveSymbols(self, symbols_dir, dump_dir=' ', multiple_dumps=True):
"""Save the symbolicated files for all crash dumps.
Args:
symbols_dir: the directory containing the symbols.
dump_dir: Path to the directory holding the crash dump files.
multiple_dumps: True if we are processing multiple dump files,
False if we are processing only the first crash.
"""
if multiple_dumps:
dump_dir = self.breakpad_dir
if not os.path.isdir(CRASHES):
os.makedirs(CRASHES)
# This will be sent to the method by the caller.
dmp_files = glob.glob(os.path.join(dump_dir, '*.dmp'))
for dmp_file in dmp_files:
dmp_id = os.path.splitext(os.path.basename(dmp_file))[0]
if multiple_dumps:
report_folder = CRASHES
else:
report_folder = dump_dir
report_fname = os.path.join(report_folder,
'%s.txt' % (dmp_id))
report = self._SymbolicateCrashDmp(dmp_file, symbols_dir,
report_fname)
if report == '':
logging.info('Crash report is empty.')
# This is for copying the original dumps.
if multiple_dumps:
shutil.copy2(dmp_file, CRASHES)
def _GetFirstCrashDir(self):
"""Get first crash file in the crash folder.
Here we create the 1stcrash directory which holds the
first crash report, which will be attached to the mail.
"""
breakpad_folder = self.breakpad_dir
dump_list = glob.glob1(breakpad_folder,'*.dmp')
dump_list.sort(key=lambda s: os.path.getmtime(os.path.join(
breakpad_folder, s)))
first_crash_file = os.path.join(breakpad_folder, dump_list[0])
if not os.path.isdir('1stcrash'):
os.makedirs('1stcrash')
shutil.copy2(first_crash_file, '1stcrash')
first_crash_dir = os.path.join(os.getcwd(), '1stcrash')
return first_crash_dir
def _GetFirstCrashFile(self):
"""Get first crash file in the crash folder."""
first_crash_dir = os.path.join(os.getcwd(), '1stcrash')
for each in os.listdir(first_crash_dir):
if each.endswith('.txt'):
first_crash_file = each
return os.path.join(first_crash_dir, first_crash_file)
def _ProcessOnlyFirstCrash(self):
""" Process only the first crash report for email."""
first_dir = self._GetFirstCrashDir()
self._SaveSymbols(self.symbols_dir, first_dir, False)
def _GetOSName(self):
"""Returns the OS type we are running this script on."""
os_name = ''
if self.IsMac():
os_number = commands.getoutput('sw_vers -productVersion | cut -c 1-4')
if os_number == '10.6':
os_name = 'Snow_Leopard'
elif os_number == '10.5':
os_name = 'Leopard'
elif self.IsWin():
# TODO: Windows team need to find the way to get OS name
os_name = 'Windows'
if platform.version()[0] == '5':
os_name = os_name + '_XP'
else:
os_name = os_name + '_Vista/Win7'
return os_name
def _ProcessUploadAndEmailCrashes(self):
"""Upload the crashes found and email the team about this."""
logging.info('#########INSIDE _ProcessUploadAndEmailCrashes#########')
try:
build_version = self.chrome_version
self._SaveSymbols(self.symbols_dir)
self._ProcessOnlyFirstCrash()
file_to_attach = self._GetFirstCrashFile()
# removing the crash_txt for now,
# since we are getting UnicodeDecodeError
# crash_txt = open(file_to_attach).read()
except ValueError:
test_utils.SendMail(self.stress_pref['mailing_address'],
self.stress_pref['mailing_address'],
"We don't have build version",
"BROWSER CRASHED, PLEASE CHECK",
self.stress_pref['smtp'])
# Move crash reports and dumps to server
os_name = self._GetOSName()
dest_dir = build_version + '_' + os_name
if (test_utils.Shell2(self.stress_pref['script'] % (CRASHES, dest_dir))):
logging.info('Copy Complete')
upload_dir= self.stress_pref['upload_dir'] + dest_dir
num_crashes = '\n \n Number of Crashes :' + \
str(len(glob.glob1(self.breakpad_dir, '*.dmp')))
mail_content = '\n\n Crash Report URL :' + upload_dir + '\n' + \
num_crashes + '\n\n' # + crash_txt
mail_subject = 'Stress Results :' + os_name + '_' + build_version
# Sending mail with first crash report, # of crashes, location of upload
test_utils.SendMail(self.stress_pref['mailing_address'],
self.stress_pref['mailing_address'],
mail_subject, mail_content,
self.stress_pref['smtp'], file_to_attach)
def _ReportCrashIfAny(self):
"""Check for browser crashes and report."""
if os.path.isdir(self.breakpad_dir):
listOfDumps = glob.glob(os.path.join(self.breakpad_dir, '*.dmp'))
if len(listOfDumps) > 0:
logging.info('========== INSIDE REPORT CRASH++++++++++++++')
# inform a method to process the dumps
self._ProcessUploadAndEmailCrashes()
# Test functions
def _PrefStress(self):
"""Stress preferences."""
default_prefs = self.GetPrefsInfo()
pref_dictionary = self._SetPref()
for key, value in pref_dictionary.iteritems():
self.assertEqual(value, self.GetPrefsInfo().Prefs(
getattr(pyauto, key)))
for key, value in pref_dictionary.iteritems():
self.SetPrefs(getattr(pyauto, key),
default_prefs.Prefs(getattr(pyauto, key)))
def _NavigationStress(self):
"""Run back and forward stress in normal and incognito window."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self._StressTestNavigation()
def _DownloadStress(self):
"""Run all the Download stress test."""
org_download_dir = self.GetDownloadDirectory().value()
new_dl_dir = os.path.join(org_download_dir, 'My+Downloads Folder')
os.path.exists(new_dl_dir) and shutil.rmtree(new_dl_dir)
os.makedirs(new_dl_dir)
self.SetPrefs(pyauto.kDownloadDefaultDirectory, new_dl_dir)
self._OpenAndCloseMultipleTabsWithDownloads()
self._OpenAndCloseMultipleWindowsWithDownloads()
self._OpenAndCloseMultipleTabsWithMultipleDownloads()
self._OpenAndCloseMultipleWindowsWithMultipleDownloads()
pyauto_utils.RemovePath(new_dl_dir) # cleanup
self.SetPrefs(pyauto.kDownloadDefaultDirectory, org_download_dir)
def _PluginStress(self):
"""Run all the plugin stress tests."""
self._OpenAndCloseMultipleTabsWithFlash()
self._OpenAndCloseMultipleWindowsWithFlash()
self._OpenAndCloseMultipleTabsWithMultiplePlugins()
self._OpenAndCloseMultipleWindowsWithMultiplePlugins()
self._KillAndReloadRenderersWithFlash()
self._ToggleAndReloadFlashPlugin()
def testStress(self):
"""Run all the stress tests for 24 hrs."""
if self.GetBrowserInfo()['properties']['branding'] != 'Google Chrome':
logging.info('This is not a branded build, so stopping the stress')
return 1
self._DownloadSymbols()
run_number = 1
start_time = time.time()
while True:
logging.info('run %d...' % run_number)
run_number = run_number + 1
if (time.time() - start_time) >= 24*60*60:
logging.info('Its been 24hrs, so we break now.')
break
try:
methods = [self._NavigationStress, self._DownloadStress,
self._PluginStress, self._PrefStress]
random.shuffle(methods)
for method in methods:
method()
logging.info('Method %s done' % method)
except KeyboardInterrupt:
logging.info('----------We got a KeyboardInterrupt-----------')
except Exception, error:
logging.info('-------------There was an ERROR---------------')
logging.info(error)
# Crash Reporting
self._ReportCrashIfAny()
self._DeleteDumps()
if self.IsMac():
zombie = 'ps -el | grep Chrom | grep -v grep | grep Z | wc -l'
zombie_count = int(commands.getoutput(zombie))
if zombie_count > 0:
logging.info('WE HAVE ZOMBIES = %d' % zombie_count)
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
ghedsouza/django | django/utils/http.py | 9 | 14635 | import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(k, [str(i) for i in v] if isinstance(v, (list, tuple)) else str(v))
for k, v in query],
doseq
)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None, allowed_hosts=None, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
if host:
warnings.warn(
"The host argument is deprecated, use allowed_hosts instead.",
RemovedInDjango21Warning,
stacklevel=2,
)
# Avoid mutating the passed in allowed_hosts.
allowed_hosts = allowed_hosts | {host}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
| bsd-3-clause |
indevgr/django | django/contrib/gis/db/backends/postgis/pgraster.py | 491 | 5071 | import binascii
import struct
from django.forms import ValidationError
from .const import (
GDAL_TO_POSTGIS, GDAL_TO_STRUCT, POSTGIS_HEADER_STRUCTURE, POSTGIS_TO_GDAL,
STRUCT_SIZE,
)
def pack(structure, data):
"""
Pack data into hex string with little endian format.
"""
return binascii.hexlify(struct.pack('<' + structure, *data)).upper()
def unpack(structure, data):
"""
Unpack little endian hexlified binary string into a list.
"""
return struct.unpack('<' + structure, binascii.unhexlify(data))
def chunk(data, index):
"""
Split a string into two parts at the input index.
"""
return data[:index], data[index:]
def get_pgraster_srid(data):
"""
Extract the SRID from a PostGIS raster string.
"""
if data is None:
return
# The positional arguments here extract the hex-encoded srid from the
# header of the PostGIS raster string. This can be understood through
# the POSTGIS_HEADER_STRUCTURE constant definition in the const module.
return unpack('i', data[106:114])[0]
def from_pgraster(data):
"""
Convert a PostGIS HEX String into a dictionary.
"""
if data is None:
return
# Split raster header from data
header, data = chunk(data, 122)
header = unpack(POSTGIS_HEADER_STRUCTURE, header)
# Parse band data
bands = []
pixeltypes = []
while data:
# Get pixel type for this band
pixeltype, data = chunk(data, 2)
pixeltype = unpack('B', pixeltype)[0]
# Subtract nodata byte from band nodata value if it exists
has_nodata = pixeltype >= 64
if has_nodata:
pixeltype -= 64
# Convert datatype from PostGIS to GDAL & get pack type and size
pixeltype = POSTGIS_TO_GDAL[pixeltype]
pack_type = GDAL_TO_STRUCT[pixeltype]
pack_size = 2 * STRUCT_SIZE[pack_type]
# Parse band nodata value. The nodata value is part of the
# PGRaster string even if the nodata flag is True, so it always
# has to be chunked off the data string.
nodata, data = chunk(data, pack_size)
nodata = unpack(pack_type, nodata)[0]
# Chunk and unpack band data (pack size times nr of pixels)
band, data = chunk(data, pack_size * header[10] * header[11])
band_result = {'data': binascii.unhexlify(band)}
# If the nodata flag is True, set the nodata value.
if has_nodata:
band_result['nodata_value'] = nodata
# Append band data to band list
bands.append(band_result)
# Store pixeltype of this band in pixeltypes array
pixeltypes.append(pixeltype)
# Check that all bands have the same pixeltype.
# This is required by GDAL. PostGIS rasters could have different pixeltypes
# for bands of the same raster.
if len(set(pixeltypes)) != 1:
raise ValidationError("Band pixeltypes are not all equal.")
return {
'srid': int(header[9]),
'width': header[10], 'height': header[11],
'datatype': pixeltypes[0],
'origin': (header[5], header[6]),
'scale': (header[3], header[4]),
'skew': (header[7], header[8]),
'bands': bands,
}
def to_pgraster(rast):
"""
Convert a GDALRaster into PostGIS Raster format.
"""
# Return if the raster is null
if rast is None or rast == '':
return
# Prepare the raster header data as a tuple. The first two numbers are
# the endianness and the PostGIS Raster Version, both are fixed by
# PostGIS at the moment.
rasterheader = (
1, 0, len(rast.bands), rast.scale.x, rast.scale.y,
rast.origin.x, rast.origin.y, rast.skew.x, rast.skew.y,
rast.srs.srid, rast.width, rast.height,
)
# Hexlify raster header
result = pack(POSTGIS_HEADER_STRUCTURE, rasterheader)
for band in rast.bands:
# The PostGIS raster band header has exactly two elements, a 8BUI byte
# and the nodata value.
#
# The 8BUI stores both the PostGIS pixel data type and a nodata flag.
# It is composed as the datatype integer plus 64 as a flag for existing
# nodata values:
# 8BUI_VALUE = PG_PIXEL_TYPE (0-11) + FLAG (0 or 64)
#
# For example, if the byte value is 71, then the datatype is
# 71-64 = 7 (32BSI) and the nodata value is True.
structure = 'B' + GDAL_TO_STRUCT[band.datatype()]
# Get band pixel type in PostGIS notation
pixeltype = GDAL_TO_POSTGIS[band.datatype()]
# Set the nodata flag
if band.nodata_value is not None:
pixeltype += 64
# Pack band header
bandheader = pack(structure, (pixeltype, band.nodata_value or 0))
# Hexlify band data
band_data_hex = binascii.hexlify(band.data(as_memoryview=True)).upper()
# Add packed header and band data to result
result += bandheader + band_data_hex
# Cast raster to string before passing it to the DB
return result.decode()
| bsd-3-clause |
alisw/alibuild | tests/test_init.py | 1 | 5571 | from __future__ import print_function
# Assuming you are using the mock library to ... mock things
try:
from unittest import mock
from unittest.mock import call, MagicMock # In Python 3, mock is built-in
from io import StringIO
except ImportError:
import mock
from mock import call, MagicMock # Python 2
from StringIO import StringIO
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import sys
git_mock = MagicMock(partialCloneFilter="--filter=blob:none")
sys.modules["alibuild_helpers.git"] = git_mock
from alibuild_helpers.init import doInit,parsePackagesDefinition
import unittest
from argparse import Namespace
import os.path as path
def can_do_git_clone(x):
return 0
def valid_recipe(x):
if "zlib" in x.url:
return (0, {"package": "zlib",
"source": "https://github.com/alisw/zlib",
"version": "v1.0"}, "")
elif "aliroot" in x.url:
return (0, {"package": "AliRoot",
"source": "https://github.com/alisw/AliRoot",
"version": "master"}, "")
def dummy_exists(x):
calls = { '/sw/MIRROR/aliroot': True }
if x in calls:
return calls[x]
return False
CLONE_EVERYTHING = [
call(u'git clone --origin upstream --filter=blob:none https://github.com/alisw/alidist -b master /alidist'),
call(u'git clone --origin upstream --filter=blob:none https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push upstream https://github.com/alisw/AliRoot')
]
class InitTestCase(unittest.TestCase):
def test_packageDefinition(self):
self.assertEqual(parsePackagesDefinition("AliRoot@v5-08-16,AliPhysics@v5-08-16-01"),
[{'ver': 'v5-08-16', 'name': 'AliRoot'},
{'ver': 'v5-08-16-01', 'name': 'AliPhysics'}])
self.assertEqual(parsePackagesDefinition("AliRoot,AliPhysics@v5-08-16-01"),
[{'ver': '', 'name': 'AliRoot'},
{'ver': 'v5-08-16-01', 'name': 'AliPhysics'}])
@mock.patch("alibuild_helpers.init.info")
@mock.patch("alibuild_helpers.init.path")
@mock.patch("alibuild_helpers.init.os")
def test_doDryRunInit(self, mock_os, mock_path, mock_info):
fake_dist = {"repo": "alisw/alidist", "ver": "master"}
args = Namespace(
develPrefix = ".",
configDir = "/alidist",
pkgname = "zlib,AliRoot@v5-08-00",
referenceSources = "/sw/MIRROR",
dist = fake_dist,
defaults = "release",
dryRun = True,
fetchRepos = False,
architecture = "slc7_x86-64"
)
self.assertRaises(SystemExit, doInit, args)
self.assertEqual(mock_info.mock_calls, [call('This will initialise local checkouts for %s\n--dry-run / -n specified. Doing nothing.', 'zlib,AliRoot')])
@mock.patch("alibuild_helpers.init.banner")
@mock.patch("alibuild_helpers.init.info")
@mock.patch("alibuild_helpers.init.path")
@mock.patch("alibuild_helpers.init.os")
@mock.patch("alibuild_helpers.init.execute")
@mock.patch("alibuild_helpers.init.parseRecipe")
@mock.patch("alibuild_helpers.init.updateReferenceRepoSpec")
@mock.patch("alibuild_helpers.utilities.open")
@mock.patch("alibuild_helpers.init.readDefaults")
def test_doRealInit(self, mock_read_defaults, mock_open, mock_update_reference, mock_parse_recipe, mock_execute, mock_os, mock_path, mock_info, mock_banner):
fake_dist = {"repo": "alisw/alidist", "ver": "master"}
mock_open.side_effect = lambda x: {
"/alidist/defaults-release.sh": StringIO("package: defaults-release\nversion: v1\n---"),
"/alidist/aliroot.sh": StringIO("package: AliRoot\nversion: master\nsource: https://github.com/alisw/AliRoot\n---")
}[x]
mock_execute.side_effect = can_do_git_clone
mock_parse_recipe.side_effect = valid_recipe
mock_path.exists.side_effect = dummy_exists
mock_os.mkdir.return_value = None
mock_path.join.side_effect = path.join
mock_read_defaults.return_value = (OrderedDict({"package": "defaults-release", "disable": []}), "")
args = Namespace(
develPrefix = ".",
configDir = "/alidist",
pkgname = "AliRoot@v5-08-00",
referenceSources = "/sw/MIRROR",
dist = fake_dist,
defaults = "release",
dryRun = False,
fetchRepos = False,
architecture = "slc7_x86-64"
)
doInit(args)
mock_execute.assert_called_with("git clone --origin upstream --filter=blob:none https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push upstream https://github.com/alisw/AliRoot")
self.assertEqual(mock_execute.mock_calls, CLONE_EVERYTHING)
mock_path.exists.assert_has_calls([call('.'), call('/sw/MIRROR'), call('/alidist'), call('./AliRoot')])
# Force fetch repos
mock_execute.reset_mock()
mock_path.reset_mock()
args.fetchRepos = True
doInit(args)
mock_execute.assert_called_with("git clone --origin upstream --filter=blob:none https://github.com/alisw/AliRoot -b v5-08-00 --reference /sw/MIRROR/aliroot ./AliRoot && cd ./AliRoot && git remote set-url --push upstream https://github.com/alisw/AliRoot")
self.assertEqual(mock_execute.mock_calls, CLONE_EVERYTHING)
mock_path.exists.assert_has_calls([call('.'), call('/sw/MIRROR'), call('/alidist'), call('./AliRoot')])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/SQLAlchemy-0.8.0b2-py2.7-linux-x86_64.egg/sqlalchemy/testing/schema.py | 8 | 2933 |
from . import exclusions
from .. import schema, event
from . import config
__all__ = 'Table', 'Column',
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in kw.keys()
if k.startswith('test_')])
kw.update(table_options)
if exclusions.against('mysql'):
if 'mysql_engine' not in kw and 'mysql_type' not in kw:
if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts:
kw['mysql_engine'] = 'InnoDB'
else:
kw['mysql_engine'] = 'MyISAM'
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around seleting self-refs too.
if exclusions.against('firebird'):
table_name = args[0]
unpack = (config.db.dialect.
identifier_preparer.unformat_identifiers)
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [fk
for col in args if isinstance(col, schema.Column)
for fk in col.foreign_keys]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = 'CASCADE'
if fk.onupdate is None:
fk.onupdate = 'CASCADE'
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in kw.keys()
if k.startswith('test_')])
if not config.requirements.foreign_key_ddl.enabled:
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if 'test_needs_autoincrement' in test_opts and \
kw.get('primary_key', False) and \
exclusions.against('firebird', 'oracle'):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(_truncate_name(
config.db.dialect, tbl.name + '_' + c.name + '_seq'),
optional=True)
)
event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
"_" + hex(hash(name) % 64)[2:]
else:
return name
| gpl-2.0 |
onoga/toolib | toolib/wx/util/ControlHost.py | 2 | 1928 | # -*- coding: Cp1251 -*-
###############################################################################
#
'''
'''
__author__ = "Oleg Noga"
__date__ = "$Date: 2005/12/07 19:53:53 $"
__version__ = "$Revision: 1.2 $"
# $Source: D:/HOME/cvs/toolib/wx/util/ControlHost.py,v $
###############################################################################
import wx
from toolib.util.OrderDict import OrderDict
class ControlHost(object):
"""
registers labels, controls, validators
validates, produces error messages,
gathers data
"""
_POS_CONTROL = 0
_POS_VALIDATOR = 1
_POS_LABEL = 2
def __init__(self):
self.__controls = OrderDict()
def getControlIds(self):
return self.__controls.keys()
def registerControl(self, id, control, validator=None, label=None):
self.__controls[id] = (control, validator, label)
def validate(self):
errors = []
for id, (control, validator, label) in self.__controls.iteritems():
if validator is not None and control.IsEnabled():
value = self._getControlValue(id)
try:
validator.validate(value, label)
except ValueError, e:
errors.append(e[0])
return errors
def getControl(self, id):
return self.__controls[id][self._POS_CONTROL]
def _getControlValue(self, id):
c = self.getControl(id)
if hasattr(c, 'getDate'):
return c.getDate()
else:
return c.GetValue()
def getValidator(self, id):
return self.__controls[id][self._POS_VALIDATOR]
def setValidator(self, id, validator):
control, oldValidator, label = self.__controls[id]
self.registerControl(id, control, validator, label)
def getLabel(self, id):
return self.__controls[id][self._POS_LABEL]
def setLabel(self, id, label):
control, validator, oldLabel = self.__controls[id]
self.registerControl(id, control, validator, label)
def getData(self):
d = {}
for id in self.__controls.iterkeys():
d[id] = self._getControlValue(id)
return d
| gpl-2.0 |
sqlobject/sqlobject | sqlobject/boundattributes.py | 2 | 4191 | """
Bound attributes are attributes that are bound to a specific class and
a specific name. In SQLObject a typical example is a column object,
which knows its name and class.
A bound attribute should define a method ``__addtoclass__(added_class,
name)`` (attributes without this method will simply be treated as
normal). The return value is ignored; if the attribute wishes to
change the value in the class, it must call ``setattr(added_class,
name, new_value)``.
BoundAttribute is a class that facilitates lazy attribute creation.
"""
from __future__ import absolute_import
from . import declarative
from . import events
__all__ = ['BoundAttribute', 'BoundFactory']
class BoundAttribute(declarative.Declarative):
"""
This is a declarative class that passes all the values given to it
to another object. So you can pass it arguments (via
__init__/__call__) or give it the equivalent of keyword arguments
through subclassing. Then a bound object will be added in its
place.
To hook this other object in, override ``make_object(added_class,
name, **attrs)`` and maybe ``set_object(added_class, name,
**attrs)`` (the default implementation of ``set_object``
just resets the attribute to whatever ``make_object`` returned).
Also see ``BoundFactory``.
"""
_private_variables = (
'_private_variables',
'_all_attributes',
'__classinit__',
'__addtoclass__',
'_add_attrs',
'set_object',
'make_object',
'clone_in_subclass',
)
_all_attrs = ()
clone_for_subclass = True
def __classinit__(cls, new_attrs):
declarative.Declarative.__classinit__(cls, new_attrs)
cls._all_attrs = cls._add_attrs(cls, new_attrs)
def __instanceinit__(self, new_attrs):
declarative.Declarative.__instanceinit__(self, new_attrs)
self.__dict__['_all_attrs'] = self._add_attrs(self, new_attrs)
@staticmethod
def _add_attrs(this_object, new_attrs):
private = this_object._private_variables
all_attrs = list(this_object._all_attrs)
for key in new_attrs.keys():
if key.startswith('_') or key in private:
continue
if key not in all_attrs:
all_attrs.append(key)
return tuple(all_attrs)
@declarative.classinstancemethod
def __addtoclass__(self, cls, added_class, attr_name):
me = self or cls
attrs = {}
for name in me._all_attrs:
attrs[name] = getattr(me, name)
attrs['added_class'] = added_class
attrs['attr_name'] = attr_name
obj = me.make_object(**attrs)
if self.clone_for_subclass:
def on_rebind(new_class_name, bases, new_attrs,
post_funcs, early_funcs):
def rebind(new_class):
me.set_object(
new_class, attr_name,
me.make_object(**attrs))
post_funcs.append(rebind)
events.listen(receiver=on_rebind, soClass=added_class,
signal=events.ClassCreateSignal, weak=False)
me.set_object(added_class, attr_name, obj)
@classmethod
def set_object(cls, added_class, attr_name, obj):
setattr(added_class, attr_name, obj)
@classmethod
def make_object(cls, added_class, attr_name, *args, **attrs):
raise NotImplementedError
def __setattr__(self, name, value):
self.__dict__['_all_attrs'] = self._add_attrs(self, {name: value})
self.__dict__[name] = value
class BoundFactory(BoundAttribute):
"""
This will bind the attribute to whatever is given by
``factory_class``. This factory should be a callable with the
signature ``factory_class(added_class, attr_name, *args, **kw)``.
The factory will be reinvoked (and the attribute rebound) for
every subclassing.
"""
factory_class = None
_private_variables = (
BoundAttribute._private_variables + ('factory_class',))
def make_object(cls, added_class, attr_name, *args, **kw):
return cls.factory_class(added_class, attr_name, *args, **kw)
| lgpl-2.1 |
Tiger66639/ansible-modules-core | windows/win_get_url.py | 13 | 1921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Paul Durivage <[email protected]>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_get_url
version_added: "1.7"
short_description: Fetches a file from a given URL
description:
- Fetches a file from a URL and saves to locally
options:
url:
description:
- The full URL of a file to download
required: true
default: null
aliases: []
dest:
description:
- The absolute path of the location to save the file at the URL. Be sure to include a filename and extension as appropriate.
required: false
default: yes
aliases: []
author: "Paul Durivage (@angstwad)"
'''
EXAMPLES = '''
# Downloading a JPEG and saving it to a file with the ansible command.
# Note the "dest" is quoted rather instead of escaping the backslashes
$ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthrise.jpg dest='C:\Users\Administrator\earthrise.jpg'" all
# Playbook example
- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg'
win_get_url:
url: 'http://www.example.com/earthrise.jpg'
dest: 'C:\Users\RandomUser\earthrise.jpg'
'''
| gpl-3.0 |
bema-ligo/pycbc | pycbc/types/array_cpu.py | 1 | 5898 | # Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Numpy based CPU backend for PyCBC Array
"""
from __future__ import absolute_import
import numpy as _np
from pycbc.types.array import common_kind, complex128, float64
from . import aligned as _algn
from scipy.linalg import blas
from weave import inline
from pycbc.opt import omp_libs, omp_flags
from pycbc import WEAVE_FLAGS
from pycbc.types import real_same_precision_as
def zeros(length, dtype=_np.float64):
return _algn.zeros(length, dtype=dtype)
def empty(length, dtype=_np.float64):
return _algn.empty(length, dtype=dtype)
def ptr(self):
return self.data.ctypes.data
def dot(self, other):
return _np.dot(self._data,other)
def min(self):
return self.data.min()
code_abs_arg_max = """
float val = 0;
int l = 0;
for (int i=0; i<N; i++){
float mag = data[i*2] * data[i*2] + data[i*2+1] * data[i*2+1];
if ( mag > val){
l = i;
val = mag;
}
}
loc[0] = l;
"""
code_flags = [WEAVE_FLAGS] + omp_flags
def abs_arg_max(self):
if self.kind == 'real':
return _np.argmax(self.data)
else:
data = _np.array(self._data,
copy=False).view(real_same_precision_as(self))
loc = _np.array([0])
N = len(self)
inline(code_abs_arg_max, ['data', 'loc', 'N'], libraries=omp_libs,
extra_compile_args=code_flags)
return loc[0]
def abs_max_loc(self):
if self.kind == 'real':
tmp = abs(self.data)
ind = _np.argmax(tmp)
return tmp[ind], ind
else:
tmp = self.data.real ** 2.0
tmp += self.data.imag ** 2.0
ind = _np.argmax(tmp)
return tmp[ind] ** 0.5, ind
def cumsum(self):
return self.data.cumsum()
def max(self):
return self.data.max()
def max_loc(self):
ind = _np.argmax(self.data)
return self.data[ind], ind
def take(self, indices):
return self.data.take(indices)
def weighted_inner(self, other, weight):
""" Return the inner product of the array with complex conjugation.
"""
if weight is None:
return self.inner(other)
cdtype = common_kind(self.dtype, other.dtype)
if cdtype.kind == 'c':
acum_dtype = complex128
else:
acum_dtype = float64
return _np.sum(self.data.conj() * other / weight, dtype=acum_dtype)
inner_code = """
double value = 0;
#pragma omp parallel for reduction(+:value)
for (int i=0; i<N; i++){
float val = x[i] * y[i];
value += val;
}
total[0] = value;
"""
def inner_inline_real(self, other):
x = _np.array(self._data, copy=False)
y = _np.array(other, copy=False)
total = _np.array([0.], dtype=float64)
N = len(self)
inline(inner_code, ['x', 'y', 'total', 'N'], libraries=omp_libs,
extra_compile_args=code_flags)
return total[0]
def inner(self, other):
""" Return the inner product of the array with complex conjugation.
"""
cdtype = common_kind(self.dtype, other.dtype)
if cdtype.kind == 'c':
return _np.sum(self.data.conj() * other, dtype=complex128)
else:
return inner_inline_real(self, other)
def vdot(self, other):
""" Return the inner product of the array with complex conjugation.
"""
return _np.vdot(self.data, other)
def squared_norm(self):
""" Return the elementwise squared norm of the array """
return (self.data.real**2 + self.data.imag**2)
_blas_mandadd_funcs = {}
_blas_mandadd_funcs[_np.float32] = blas.saxpy
_blas_mandadd_funcs[_np.float64] = blas.daxpy
_blas_mandadd_funcs[_np.complex64] = blas.caxpy
_blas_mandadd_funcs[_np.complex128] = blas.zaxpy
def multiply_and_add(self, other, mult_fac):
"""
Return other multiplied by mult_fac and with self added.
Self will be modified in place. This requires all inputs to be of the same
precision.
"""
# Sanity checking should have already be done. But we don't know if
# mult_fac and add_fac are arrays or scalars.
inpt = _np.array(self.data, copy=False)
N = len(inpt)
# For some reason, _checkother decorator returns other.data so we don't
# take .data here
other = _np.array(other, copy=False)
assert(inpt.dtype == other.dtype)
blas_fnc = _blas_mandadd_funcs[inpt.dtype.type]
return blas_fnc(other, inpt, a=mult_fac)
def numpy(self):
return self._data
def _copy(self, self_ref, other_ref):
self_ref[:] = other_ref[:]
def _getvalue(self, index):
return self._data[index]
def sum(self):
if self.kind == 'real':
return _np.sum(self._data,dtype=float64)
else:
return _np.sum(self._data,dtype=complex128)
def clear(self):
self[:] = 0
def _scheme_matches_base_array(array):
# Since ArrayWithAligned is a subclass of ndarray,
# and since converting to ArrayWithAligned will
# *not* copy 'array', the following is the way to go:
if isinstance(array, _np.ndarray):
return True
else:
return False
| gpl-3.0 |
magenta/magenta | magenta/models/nsynth/baseline/models/ae.py | 1 | 7401 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Autoencoder model for training on spectrograms."""
from magenta.contrib import training as contrib_training
from magenta.models.nsynth import utils
import numpy as np
import tensorflow.compat.v1 as tf
import tf_slim as slim
def get_hparams(config_name):
"""Set hyperparameters.
Args:
config_name: Name of config module to use.
Returns:
A HParams object (magenta) with defaults.
"""
hparams = contrib_training.HParams(
# Optimization
batch_size=16,
learning_rate=1e-4,
adam_beta=0.5,
max_steps=6000 * 50000,
samples_per_second=16000,
num_samples=64000,
# Preprocessing
n_fft=1024,
hop_length=256,
mask=True,
log_mag=True,
use_cqt=False,
re_im=False,
dphase=True,
mag_only=False,
pad=True,
mu_law_num=0,
raw_audio=False,
# Graph
num_latent=64, # dimension of z.
cost_phase_mask=False,
phase_loss_coeff=1.0,
fw_loss_coeff=1.0, # Frequency weighted cost
fw_loss_cutoff=1000,
)
# Set values from a dictionary in the config
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hasattr(config, "config_hparams"):
config_hparams = config.config_hparams
hparams.update(config_hparams)
return hparams
def compute_mse_loss(x, xhat, hparams):
"""MSE loss function.
Args:
x: Input data tensor.
xhat: Reconstruction tensor.
hparams: Hyperparameters.
Returns:
total_loss: MSE loss scalar.
"""
with tf.name_scope("Losses"):
if hparams.raw_audio:
total_loss = tf.reduce_mean((x - xhat)**2)
else:
# Magnitude
m = x[:, :, :, 0] if hparams.cost_phase_mask else 1.0
fm = utils.frequency_weighted_cost_mask(
hparams.fw_loss_coeff,
hz_flat=hparams.fw_loss_cutoff,
n_fft=hparams.n_fft)
mag_loss = tf.reduce_mean(fm * (x[:, :, :, 0] - xhat[:, :, :, 0])**2)
if hparams.mag_only:
total_loss = mag_loss
else:
# Phase
if hparams.dphase:
phase_loss = tf.reduce_mean(fm * m *
(x[:, :, :, 1] - xhat[:, :, :, 1])**2)
else:
# Von Mises Distribution "Circular Normal"
# Added constant to keep positive (Same Probability) range [0, 2]
phase_loss = 1 - tf.reduce_mean(fm * m * tf.cos(
(x[:, :, :, 1] - xhat[:, :, :, 1]) * np.pi))
total_loss = mag_loss + hparams.phase_loss_coeff * phase_loss
tf.summary.scalar("Loss/Mag", mag_loss)
tf.summary.scalar("Loss/Phase", phase_loss)
tf.summary.scalar("Loss/Total", total_loss)
return total_loss
def train_op(batch, hparams, config_name):
"""Define a training op, including summaries and optimization.
Args:
batch: Dictionary produced by NSynthDataset.
hparams: Hyperparameters dictionary.
config_name: Name of config module.
Returns:
train_op: A complete iteration of training with summaries.
"""
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams)
xhat = config.decode(z, batch, hparams)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
# Compute losses
total_loss = compute_mse_loss(x, xhat, hparams)
# Apply optimizer
with tf.name_scope("Optimizer"):
global_step = tf.get_variable(
"global_step", [],
tf.int64,
initializer=tf.constant_initializer(0),
trainable=False)
optimizer = tf.train.AdamOptimizer(hparams.learning_rate, hparams.adam_beta)
train_step = slim.learning.create_train_op(total_loss,
optimizer,
global_step=global_step)
return train_step
def eval_op(batch, hparams, config_name):
"""Define a evaluation op.
Args:
batch: Batch produced by NSynthReader.
hparams: Hyperparameters.
config_name: Name of config module.
Returns:
eval_op: A complete evaluation op with summaries.
"""
phase = not (hparams.mag_only or hparams.raw_audio)
config = utils.get_module("baseline.models.ae_configs.%s" % config_name)
if hparams.raw_audio:
x = batch["audio"]
# Add height and channel dims
x = tf.expand_dims(tf.expand_dims(x, 1), -1)
else:
x = batch["spectrogram"]
# Define the model
with tf.name_scope("Model"):
z = config.encode(x, hparams, is_training=False)
xhat = config.decode(z, batch, hparams, is_training=False)
# For interpolation
tf.add_to_collection("x", x)
tf.add_to_collection("pitch", batch["pitch"])
tf.add_to_collection("z", z)
tf.add_to_collection("xhat", xhat)
total_loss = compute_mse_loss(x, xhat, hparams)
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
"Loss": slim.metrics.mean(total_loss),
})
# Define the summaries
for name, value in names_to_values.items():
slim.summaries.add_scalar_summary(value, name, print_summary=True)
# Interpolate
with tf.name_scope("Interpolation"):
xhat = config.decode(z, batch, hparams, reuse=True, is_training=False)
# Linear interpolation
z_shift_one_example = tf.concat([z[1:], z[:1]], 0)
z_linear_half = (z + z_shift_one_example) / 2.0
xhat_linear_half = config.decode(z_linear_half, batch, hparams, reuse=True,
is_training=False)
# Pitch shift
pitch_plus_2 = tf.clip_by_value(batch["pitch"] + 2, 0, 127)
pitch_minus_2 = tf.clip_by_value(batch["pitch"] - 2, 0, 127)
batch["pitch"] = pitch_minus_2
xhat_pitch_minus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
batch["pitch"] = pitch_plus_2
xhat_pitch_plus_2 = config.decode(z, batch, hparams,
reuse=True, is_training=False)
utils.specgram_summaries(x, "Training Examples", hparams, phase=phase)
utils.specgram_summaries(xhat, "Reconstructions", hparams, phase=phase)
utils.specgram_summaries(
x - xhat, "Difference", hparams, audio=False, phase=phase)
utils.specgram_summaries(
xhat_linear_half, "Linear Interp. 0.5", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_plus_2, "Pitch +2", hparams, phase=phase)
utils.specgram_summaries(xhat_pitch_minus_2, "Pitch -2", hparams, phase=phase)
return list(names_to_updates.values())
| apache-2.0 |
jamesls/boto | boto/elastictranscoder/layer1.py | 4 | 34890 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.elastictranscoder import exceptions
class ElasticTranscoderConnection(AWSAuthConnection):
"""
AWS Elastic Transcoder Service
The AWS Elastic Transcoder Service.
"""
APIVersion = "2012-09-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "elastictranscoder.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"IncompatibleVersionException": exceptions.IncompatibleVersionException,
"LimitExceededException": exceptions.LimitExceededException,
"ResourceInUseException": exceptions.ResourceInUseException,
"AccessDeniedException": exceptions.AccessDeniedException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"InternalServiceException": exceptions.InternalServiceException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
kwargs['host'] = region.endpoint
AWSAuthConnection.__init__(self, **kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def cancel_job(self, id=None):
"""
To cancel a job, send a DELETE request to the
`/2012-09-25/jobs/ [jobId] ` resource.
You can only cancel a job that has a status of `Submitted`. To
prevent a pipeline from starting to process a job while you're
getting the job identifier, use UpdatePipelineStatus to
temporarily pause the pipeline.
:type id: string
:param id: The identifier of the job that you want to delete.
To get a list of the jobs (including their `jobId`) that have a status
of `Submitted`, use the ListJobsByStatus API action.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def create_job(self, pipeline_id=None, input_name=None, output=None,
outputs=None, output_key_prefix=None, playlists=None):
"""
To create a job, send a POST request to the `/2012-09-25/jobs`
resource.
When you create a job, Elastic Transcoder returns JSON data
that includes the values that you specified plus information
about the job that is created.
If you have specified more than one output for your jobs (for
example, one output for the Kindle Fire and another output for
the Apple iPhone 4s), you currently must use the Elastic
Transcoder API to list the jobs (as opposed to the AWS
Console).
:type pipeline_id: string
:param pipeline_id: The `Id` of the pipeline that you want Elastic
Transcoder to use for transcoding. The pipeline determines several
settings, including the Amazon S3 bucket from which Elastic
Transcoder gets the files to transcode and the bucket into which
Elastic Transcoder puts the transcoded files.
:type input_name: dict
:param input_name: A section of the request body that provides
information about the file that is being transcoded.
:type output: dict
:param output:
:type outputs: list
:param outputs: A section of the request body that provides information
about the transcoded (target) files. We recommend that you use the
`Outputs` syntax instead of the `Output` syntax.
:type output_key_prefix: string
:param output_key_prefix: The value, if any, that you want Elastic
Transcoder to prepend to the names of all files that this job
creates, including output files, thumbnails, and playlists.
:type playlists: list
:param playlists: If you specify a preset in `PresetId` for which the
value of `Container` is ts (MPEG-TS), Playlists contains
information about the master playlists that you want Elastic
Transcoder to create.
We recommend that you create only one master playlist. The maximum
number of master playlists in a job is 30.
"""
uri = '/2012-09-25/jobs'
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if input_name is not None:
params['Input'] = input_name
if output is not None:
params['Output'] = output
if outputs is not None:
params['Outputs'] = outputs
if output_key_prefix is not None:
params['OutputKeyPrefix'] = output_key_prefix
if playlists is not None:
params['Playlists'] = playlists
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_pipeline(self, name=None, input_bucket=None,
output_bucket=None, role=None, notifications=None,
content_config=None, thumbnail_config=None):
"""
To create a pipeline, send a POST request to the
`2012-09-25/pipelines` resource.
:type name: string
:param name: The name of the pipeline. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
Constraints: Maximum 40 characters.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket in which you saved the media
files that you want to transcode.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket in which you want Elastic
Transcoder to save the transcoded files. (Use this, or use
ContentConfig:Bucket plus ThumbnailConfig:Bucket.)
Specify this value when all of the following are true:
+ You want to save transcoded files, thumbnails (if any), and playlists
(if any) together in one bucket.
+ You do not want to specify the users or groups who have access to the
transcoded files, thumbnails, and playlists.
+ You do not want to specify the permissions that Elastic Transcoder
grants to the files. When Elastic Transcoder saves files in
`OutputBucket`, it grants full control over the files only to the
AWS account that owns the role that is specified by `Role`.
+ You want to associate the transcoded files and thumbnails with the
Amazon S3 Standard storage class.
If you want to save transcoded files and playlists in one bucket and
thumbnails in another bucket, specify which users can access the
transcoded files or the permissions the users have, or change the
Amazon S3 storage class, omit `OutputBucket` and specify values for
`ContentConfig` and `ThumbnailConfig` instead.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to use to create the pipeline.
:type notifications: dict
:param notifications:
The Amazon Simple Notification Service (Amazon SNS) topic that you want
to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process a job in this pipeline. This is
the ARN that Amazon SNS returned when you created the topic. For
more information, see Create a Topic in the Amazon Simple
Notification Service Developer Guide.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job in
this pipeline. This is the ARN that Amazon SNS returned when you
created the topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition while
processing a job in this pipeline. This is the ARN that Amazon SNS
returned when you created the topic.
:type content_config: dict
:param content_config:
The optional `ContentConfig` object specifies information about the
Amazon S3 bucket in which you want Elastic Transcoder to save
transcoded files and playlists: which bucket to use, which users
you want to have access to the files, the type of access you want
users to have, and the storage class that you want to assign to the
files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig`.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save transcoded files and playlists.
+ **Permissions** (Optional): The Permissions object specifies which
users you want to have access to transcoded files and the type of
access you want them to have. You can grant permissions to a
maximum of 30 users and/or predefined Amazon S3 groups.
+ **Grantee Type**: Specify the type of value that appears in the
`Grantee` object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. For more information about
canonical user IDs, see Access Control List (ACL) Overview in the
Amazon Simple Storage Service Developer Guide. For more information
about using CloudFront origin access identities to require that
users use CloudFront URLs instead of Amazon S3 URLs, see Using an
Origin Access Identity to Restrict Access to Your Amazon S3
Content. A canonical user ID is not the same as an AWS account
number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
transcoded files and playlists. To identify the user or group, you
can specify the canonical user ID for an AWS account, an origin
access identity for a CloudFront distribution, the registered email
address of an AWS account, or a predefined Amazon S3 group
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the files
that Elastic Transcoder adds to the bucket, including playlists and
video files. Valid values include:
+ `READ`: The grantee can read the objects and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the objects that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the objects that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the video files and playlists that it stores in your Amazon S3
bucket.
:type thumbnail_config: dict
:param thumbnail_config:
The `ThumbnailConfig` object specifies several values, including the
Amazon S3 bucket in which you want Elastic Transcoder to save
thumbnail files, which users you want to have access to the files,
the type of access you want users to have, and the storage class
that you want to assign to the files.
If you specify values for `ContentConfig`, you must also specify values
for `ThumbnailConfig` even if you don't want to create thumbnails.
If you specify values for `ContentConfig` and `ThumbnailConfig`, omit
the `OutputBucket` object.
+ **Bucket**: The Amazon S3 bucket in which you want Elastic Transcoder
to save thumbnail files.
+ **Permissions** (Optional): The `Permissions` object specifies which
users and/or predefined Amazon S3 groups you want to have access to
thumbnail files, and the type of access you want them to have. You
can grant permissions to a maximum of 30 users and/or predefined
Amazon S3 groups.
+ **GranteeType**: Specify the type of value that appears in the
Grantee object:
+ **Canonical**: The value in the `Grantee` object is either the
canonical user ID for an AWS account or an origin access identity
for an Amazon CloudFront distribution. A canonical user ID is not
the same as an AWS account number.
+ **Email**: The value in the `Grantee` object is the registered email
address of an AWS account.
+ **Group**: The value in the `Grantee` object is one of the following
predefined Amazon S3 groups: `AllUsers`, `AuthenticatedUsers`, or
`LogDelivery`.
+ **Grantee**: The AWS user or group that you want to have access to
thumbnail files. To identify the user or group, you can specify the
canonical user ID for an AWS account, an origin access identity for
a CloudFront distribution, the registered email address of an AWS
account, or a predefined Amazon S3 group.
+ **Access**: The permission that you want to give to the AWS user that
you specified in `Grantee`. Permissions are granted on the
thumbnail files that Elastic Transcoder adds to the bucket. Valid
values include:
+ `READ`: The grantee can read the thumbnails and metadata for objects
that Elastic Transcoder adds to the Amazon S3 bucket.
+ `READ_ACP`: The grantee can read the object ACL for thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `WRITE_ACP`: The grantee can write the ACL for the thumbnails that
Elastic Transcoder adds to the Amazon S3 bucket.
+ `FULL_CONTROL`: The grantee has `READ`, `READ_ACP`, and `WRITE_ACP`
permissions for the thumbnails that Elastic Transcoder adds to the
Amazon S3 bucket.
+ **StorageClass**: The Amazon S3 storage class, `Standard` or
`ReducedRedundancy`, that you want Elastic Transcoder to assign to
the thumbnails that it stores in your Amazon S3 bucket.
"""
uri = '/2012-09-25/pipelines'
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def create_preset(self, name=None, description=None, container=None,
video=None, audio=None, thumbnails=None):
"""
To create a preset, send a POST request to the
`/2012-09-25/presets` resource.
Elastic Transcoder checks the settings that you specify to
ensure that they meet Elastic Transcoder requirements and to
determine whether they comply with H.264 standards. If your
settings are not valid for Elastic Transcoder, Elastic
Transcoder returns an HTTP 400 response (
`ValidationException`) and does not create the preset. If the
settings are valid for Elastic Transcoder but aren't strictly
compliant with the H.264 standard, Elastic Transcoder creates
the preset and returns a warning message in the response. This
helps you determine whether your settings comply with the
H.264 standard while giving you greater flexibility with
respect to the video that Elastic Transcoder produces.
Elastic Transcoder uses the H.264 video-compression format.
For more information, see the International Telecommunication
Union publication Recommendation ITU-T H.264: Advanced video
coding for generic audiovisual services .
:type name: string
:param name: The name of the preset. We recommend that the name be
unique within the AWS account, but uniqueness is not enforced.
:type description: string
:param description: A description of the preset.
:type container: string
:param container: The container type for the output file. This value
must be `mp4`.
:type video: dict
:param video: A section of the request body that specifies the video
parameters.
:type audio: dict
:param audio: A section of the request body that specifies the audio
parameters.
:type thumbnails: dict
:param thumbnails: A section of the request body that specifies the
thumbnail parameters, if any.
"""
uri = '/2012-09-25/presets'
params = {}
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if container is not None:
params['Container'] = container
if video is not None:
params['Video'] = video
if audio is not None:
params['Audio'] = audio
if thumbnails is not None:
params['Thumbnails'] = thumbnails
return self.make_request('POST', uri, expected_status=201,
data=json.dumps(params))
def delete_pipeline(self, id=None):
"""
To delete a pipeline, send a DELETE request to the
`/2012-09-25/pipelines/ [pipelineId] ` resource.
You can only delete a pipeline that has never been used or
that is not currently in use (doesn't contain any active
jobs). If the pipeline is currently in use, `DeletePipeline`
returns an error.
:type id: string
:param id: The identifier of the pipeline that you want to delete.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def delete_preset(self, id=None):
"""
To delete a preset, send a DELETE request to the
`/2012-09-25/presets/ [presetId] ` resource.
If the preset has been used, you cannot delete it.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('DELETE', uri, expected_status=202)
def list_jobs_by_pipeline(self, pipeline_id=None, ascending=None,
page_token=None):
"""
To get a list of the jobs currently in a pipeline, send a GET
request to the `/2012-09-25/jobsByPipeline/ [pipelineId] `
resource.
Elastic Transcoder returns all of the jobs currently in the
specified pipeline. The response body contains one element for
each job that satisfies the search criteria.
:type pipeline_id: string
:param pipeline_id: The ID of the pipeline for which you want to get
job information.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByPipeline/{0}'.format(pipeline_id)
params = {}
if pipeline_id is not None:
params['PipelineId'] = pipeline_id
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_jobs_by_status(self, status=None, ascending=None,
page_token=None):
"""
To get a list of the jobs that have a specified status, send a
GET request to the `/2012-09-25/jobsByStatus/ [status] `
resource.
Elastic Transcoder returns all of the jobs that have the
specified status. The response body contains one element for
each job that satisfies the search criteria.
:type status: string
:param status: To get information about all of the jobs associated with
the current AWS account that have a given status, specify the
following status: `Submitted`, `Progressing`, `Complete`,
`Canceled`, or `Error`.
:type ascending: string
:param ascending: To list jobs in chronological order by the date and
time that they were submitted, enter `True`. To list jobs in
reverse chronological order, enter `False`.
:type page_token: string
:param page_token: When Elastic Transcoder returns more than one page
of results, use `pageToken` in subsequent `GET` requests to get
each successive page of results.
"""
uri = '/2012-09-25/jobsByStatus/{0}'.format(status)
params = {}
if status is not None:
params['Status'] = status
if ascending is not None:
params['Ascending'] = ascending
if page_token is not None:
params['PageToken'] = page_token
return self.make_request('GET', uri, expected_status=200,
params=params)
def list_pipelines(self):
"""
To get a list of the pipelines associated with the current AWS
account, send a GET request to the `/2012-09-25/pipelines`
resource.
"""
uri = '/2012-09-25/pipelines'
return self.make_request('GET', uri, expected_status=200)
def list_presets(self):
"""
To get a list of all presets associated with the current AWS
account, send a GET request to the `/2012-09-25/presets`
resource.
"""
uri = '/2012-09-25/presets'
return self.make_request('GET', uri, expected_status=200)
def read_job(self, id=None):
"""
To get detailed information about a job, send a GET request to
the `/2012-09-25/jobs/ [jobId] ` resource.
:type id: string
:param id: The identifier of the job for which you want to get detailed
information.
"""
uri = '/2012-09-25/jobs/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_pipeline(self, id=None):
"""
To get detailed information about a pipeline, send a GET
request to the `/2012-09-25/pipelines/ [pipelineId] `
resource.
:type id: string
:param id: The identifier of the pipeline to read.
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def read_preset(self, id=None):
"""
To get detailed information about a preset, send a GET request
to the `/2012-09-25/presets/ [presetId] ` resource.
:type id: string
:param id: The identifier of the preset for which you want to get
detailed information.
"""
uri = '/2012-09-25/presets/{0}'.format(id)
return self.make_request('GET', uri, expected_status=200)
def test_role(self, role=None, input_bucket=None, output_bucket=None,
topics=None):
"""
To test the IAM role that's used by Elastic Transcoder to
create the pipeline, send a POST request to the
`/2012-09-25/roleTests` resource.
The `TestRole` action lets you determine whether the IAM role
you are using has sufficient permissions to let Elastic
Transcoder perform tasks associated with the transcoding
process. The action attempts to assume the specified IAM role,
checks read access to the input and output buckets, and tries
to send a test notification to Amazon SNS topics that you
specify.
:type role: string
:param role: The IAM Amazon Resource Name (ARN) for the role that you
want Elastic Transcoder to test.
:type input_bucket: string
:param input_bucket: The Amazon S3 bucket that contains media files to
be transcoded. The action attempts to read from this bucket.
:type output_bucket: string
:param output_bucket: The Amazon S3 bucket that Elastic Transcoder will
write transcoded media files to. The action attempts to read from
this bucket.
:type topics: list
:param topics: The ARNs of one or more Amazon Simple Notification
Service (Amazon SNS) topics that you want the action to send a test
notification to.
"""
uri = '/2012-09-25/roleTests'
params = {}
if role is not None:
params['Role'] = role
if input_bucket is not None:
params['InputBucket'] = input_bucket
if output_bucket is not None:
params['OutputBucket'] = output_bucket
if topics is not None:
params['Topics'] = topics
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline(self, id, name=None, input_bucket=None, role=None,
notifications=None, content_config=None,
thumbnail_config=None):
"""
:type id: string
:param id:
:type name: string
:param name:
:type input_bucket: string
:param input_bucket:
:type role: string
:param role:
:type notifications: dict
:param notifications:
:type content_config: dict
:param content_config:
:type thumbnail_config: dict
:param thumbnail_config:
"""
uri = '/2012-09-25/pipelines/{0}'.format(id)
params = {}
if name is not None:
params['Name'] = name
if input_bucket is not None:
params['InputBucket'] = input_bucket
if role is not None:
params['Role'] = role
if notifications is not None:
params['Notifications'] = notifications
if content_config is not None:
params['ContentConfig'] = content_config
if thumbnail_config is not None:
params['ThumbnailConfig'] = thumbnail_config
return self.make_request('PUT', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_notifications(self, id=None, notifications=None):
"""
To update Amazon Simple Notification Service (Amazon SNS)
notifications for a pipeline, send a POST request to the
`/2012-09-25/pipelines/ [pipelineId] /notifications` resource.
When you update notifications for a pipeline, Elastic
Transcoder returns the values that you specified in the
request.
:type id: string
:param id: The identifier of the pipeline for which you want to change
notification settings.
:type notifications: dict
:param notifications:
The topic ARN for the Amazon Simple Notification Service (Amazon SNS)
topic that you want to notify to report job status.
To receive notifications, you must also subscribe to the new topic in
the Amazon SNS console.
+ **Progressing**: The topic ARN for the Amazon Simple Notification
Service (Amazon SNS) topic that you want to notify when Elastic
Transcoder has started to process jobs that are added to this
pipeline. This is the ARN that Amazon SNS returned when you created
the topic.
+ **Completed**: The topic ARN for the Amazon SNS topic that you want
to notify when Elastic Transcoder has finished processing a job.
This is the ARN that Amazon SNS returned when you created the
topic.
+ **Warning**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters a warning condition. This
is the ARN that Amazon SNS returned when you created the topic.
+ **Error**: The topic ARN for the Amazon SNS topic that you want to
notify when Elastic Transcoder encounters an error condition. This
is the ARN that Amazon SNS returned when you created the topic.
"""
uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
params = {}
if id is not None:
params['Id'] = id
if notifications is not None:
params['Notifications'] = notifications
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def update_pipeline_status(self, id=None, status=None):
"""
To pause or reactivate a pipeline, so the pipeline stops or
restarts processing jobs, update the status for the pipeline.
Send a POST request to the `/2012-09-25/pipelines/
[pipelineId] /status` resource.
Changing the pipeline status is useful if you want to cancel
one or more jobs. You can't cancel jobs after Elastic
Transcoder has started processing them; if you pause the
pipeline to which you submitted the jobs, you have more time
to get the job IDs for the jobs that you want to cancel, and
to send a CancelJob request.
:type id: string
:param id: The identifier of the pipeline to update.
:type status: string
:param status:
The desired status of the pipeline:
+ `Active`: The pipeline is processing jobs.
+ `Paused`: The pipeline is not currently processing jobs.
"""
uri = '/2012-09-25/pipelines/{0}/status'.format(id)
params = {}
if id is not None:
params['Id'] = id
if status is not None:
params['Status'] = status
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params))
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data)
body = json.load(response)
if response.status == expected_status:
return body
else:
error_type = response.getheader('x-amzn-ErrorType').split(':')[0]
error_class = self._faults.get(error_type, self.ResponseError)
raise error_class(response.status, response.reason, body)
| mit |
tseaver/gcloud-python | monitoring/google/cloud/monitoring_v3/__init__.py | 3 | 2208 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.monitoring_v3 import types
from google.cloud.monitoring_v3.gapic import alert_policy_service_client
from google.cloud.monitoring_v3.gapic import enums
from google.cloud.monitoring_v3.gapic import group_service_client
from google.cloud.monitoring_v3.gapic import metric_service_client
from google.cloud.monitoring_v3.gapic import (
notification_channel_service_client as notification_client)
from google.cloud.monitoring_v3.gapic import uptime_check_service_client
class AlertPolicyServiceClient(
alert_policy_service_client.AlertPolicyServiceClient):
__doc__ = alert_policy_service_client.AlertPolicyServiceClient.__doc__
enums = enums
class GroupServiceClient(group_service_client.GroupServiceClient):
__doc__ = group_service_client.GroupServiceClient.__doc__
enums = enums
class MetricServiceClient(metric_service_client.MetricServiceClient):
__doc__ = metric_service_client.MetricServiceClient.__doc__
enums = enums
class NotificationChannelServiceClient(
notification_client.NotificationChannelServiceClient):
__doc__ = notification_client.NotificationChannelServiceClient.__doc__
enums = enums
class UptimeCheckServiceClient(
uptime_check_service_client.UptimeCheckServiceClient):
__doc__ = uptime_check_service_client.UptimeCheckServiceClient.__doc__
enums = enums
__all__ = (
'enums',
'types',
'AlertPolicyServiceClient',
'GroupServiceClient',
'MetricServiceClient',
'NotificationChannelServiceClient',
'UptimeCheckServiceClient',
)
| apache-2.0 |
abusse/cinder | cinder/volume/drivers/ibm/storwize_svc/__init__.py | 2 | 51024 | # Copyright 2013 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
import math
import time
from oslo_config import cfg
from oslo_utils import excutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
from cinder.volume.drivers.ibm.storwize_svc import replication as storwize_rep
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
storwize_svc_opts = [
cfg.StrOpt('storwize_svc_volpool_name',
default='volpool',
help='Storage system storage pool for volumes'),
cfg.IntOpt('storwize_svc_vol_rsize',
default=2,
help='Storage system space-efficiency parameter for volumes '
'(percentage)'),
cfg.IntOpt('storwize_svc_vol_warning',
default=0,
help='Storage system threshold for volume capacity warnings '
'(percentage)'),
cfg.BoolOpt('storwize_svc_vol_autoexpand',
default=True,
help='Storage system autoexpand parameter for volumes '
'(True/False)'),
cfg.IntOpt('storwize_svc_vol_grainsize',
default=256,
help='Storage system grain size parameter for volumes '
'(32/64/128/256)'),
cfg.BoolOpt('storwize_svc_vol_compression',
default=False,
help='Storage system compression option for volumes'),
cfg.BoolOpt('storwize_svc_vol_easytier',
default=True,
help='Enable Easy Tier for volumes'),
cfg.IntOpt('storwize_svc_vol_iogrp',
default=0,
help='The I/O group in which to allocate volumes'),
cfg.IntOpt('storwize_svc_flashcopy_timeout',
default=120,
help='Maximum number of seconds to wait for FlashCopy to be '
'prepared. Maximum value is 600 seconds (10 minutes)'),
cfg.StrOpt('storwize_svc_connection_protocol',
default='iSCSI',
help='Connection protocol (iSCSI/FC)'),
cfg.BoolOpt('storwize_svc_iscsi_chap_enabled',
default=True,
help='Configure CHAP authentication for iSCSI connections '
'(Default: Enabled)'),
cfg.BoolOpt('storwize_svc_multipath_enabled',
default=False,
help='Connect with multipath (FC only; iSCSI multipath is '
'controlled by Nova)'),
cfg.BoolOpt('storwize_svc_multihostmap_enabled',
default=True,
help='Allows vdisk to multi host mapping'),
cfg.BoolOpt('storwize_svc_npiv_compatibility_mode',
default=False,
help='Indicate whether svc driver is compatible for NPIV '
'setup. If it is compatible, it will allow no wwpns '
'being returned on get_conn_fc_wwpns during '
'initialize_connection'),
cfg.BoolOpt('storwize_svc_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('storwize_svc_stretched_cluster_partner',
default=None,
help='If operating in stretched cluster mode, specify the '
'name of the pool in which mirrored copies are stored.'
'Example: "pool2"'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_opts)
class StorwizeSVCDriver(san.SanDriver):
"""IBM Storwize V7000 and SVC iSCSI/FC volume driver.
Version history:
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim to
lsfabric, clear unused data from connections, ensure matching
WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
"""
VERSION = "1.3.2"
VDISKCOPYOPS_INTERVAL = 600
def __init__(self, *args, **kwargs):
super(StorwizeSVCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(storwize_svc_opts)
self._helpers = storwize_helpers.StorwizeHelpers(self._run_ssh)
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.replication = None
self._state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
'available_iogrps': [],
'system_name': None,
'system_id': None,
'code_level': None,
}
# Storwize has the limitation that can not burst more than 3 new ssh
# connections within 1 second. So slow down the initialization.
time.sleep(1)
def do_setup(self, ctxt):
"""Check that we have all configuration details from the storage."""
LOG.debug('enter: do_setup')
# Get storage system name, id, and code level
self._state.update(self._helpers.get_system_info())
# Get the replication helpers
self.replication = storwize_rep.StorwizeSVCReplication.factory(self)
# Validate that the pool exists
pool = self.configuration.storwize_svc_volpool_name
try:
self._helpers.get_pool_attrs(pool)
except exception.VolumeBackendAPIException:
msg = _('Failed getting details for pool %s') % pool
raise exception.InvalidInput(reason=msg)
# Check if compression is supported
self._state['compression_enabled'] = \
self._helpers.compression_enabled()
# Get the available I/O groups
self._state['available_iogrps'] = \
self._helpers.get_available_io_groups()
# Get the iSCSI and FC names of the Storwize/SVC nodes
self._state['storage_nodes'] = self._helpers.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
self._helpers.add_iscsi_ip_addrs(self._state['storage_nodes'])
self._helpers.add_fc_wwpns(self._state['storage_nodes'])
# For each node, check what connection modes it supports. Delete any
# nodes that do not support any types (may be partially configured).
to_delete = []
for k, node in self._state['storage_nodes'].iteritems():
if ((len(node['ipv4']) or len(node['ipv6']))
and len(node['iscsi_name'])):
node['enabled_protocols'].append('iSCSI')
self._state['enabled_protocols'].add('iSCSI')
if len(node['WWPN']):
node['enabled_protocols'].append('FC')
self._state['enabled_protocols'].add('FC')
if not len(node['enabled_protocols']):
to_delete.append(k)
for delkey in to_delete:
del self._state['storage_nodes'][delkey]
# Make sure we have at least one node configured
if not len(self._state['storage_nodes']):
msg = _('do_setup: No configured nodes.')
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Build the list of in-progress vdisk copy operations
if ctxt is None:
admin_context = context.get_admin_context()
else:
admin_context = ctxt.elevated()
volumes = self.db.volume_get_all_by_host(admin_context, self.host)
for volume in volumes:
metadata = self.db.volume_admin_metadata_get(admin_context,
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
ops = [tuple(x.split(':')) for x in curr_ops.split(';')]
self._vdiskcopyops[volume['id']] = ops
# if vdiskcopy exists in database, start the looping call
if len(self._vdiskcopyops) >= 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
LOG.debug('leave: do_setup')
def check_for_setup_error(self):
"""Ensure that the flags are set properly."""
LOG.debug('enter: check_for_setup_error')
# Check that we have the system ID information
if self._state['system_name'] is None:
exception_msg = (_('Unable to determine system name'))
raise exception.VolumeBackendAPIException(data=exception_msg)
if self._state['system_id'] is None:
exception_msg = (_('Unable to determine system id'))
raise exception.VolumeBackendAPIException(data=exception_msg)
required_flags = ['san_ip', 'san_ssh_port', 'san_login',
'storwize_svc_volpool_name']
for flag in required_flags:
if not self.configuration.safe_get(flag):
raise exception.InvalidInput(reason=_('%s is not set') % flag)
# Ensure that either password or keyfile were set
if not (self.configuration.san_password or
self.configuration.san_private_key):
raise exception.InvalidInput(
reason=_('Password or SSH private key is required for '
'authentication: set either san_password or '
'san_private_key option'))
# Check that flashcopy_timeout is not more than 10 minutes
flashcopy_timeout = self.configuration.storwize_svc_flashcopy_timeout
if not (flashcopy_timeout > 0 and flashcopy_timeout <= 600):
raise exception.InvalidInput(
reason=_('Illegal value %d specified for '
'storwize_svc_flashcopy_timeout: '
'valid values are between 0 and 600')
% flashcopy_timeout)
opts = self._helpers.build_default_opts(self.configuration)
self._helpers.check_vdisk_opts(self._state, opts)
LOG.debug('leave: check_for_setup_error')
def ensure_export(self, ctxt, volume):
"""Check that the volume exists on the storage.
The system does not "export" volumes as a Linux iSCSI target does,
and therefore we just check that the volume exists on the storage.
"""
volume_defined = self._helpers.is_vdisk_defined(volume['name'])
if not volume_defined:
LOG.error(_LE('ensure_export: Volume %s not found on storage')
% volume['name'])
def create_export(self, ctxt, volume):
model_update = None
return model_update
def remove_export(self, ctxt, volume):
pass
def validate_connector(self, connector):
"""Check connector for at least one enabled protocol (iSCSI/FC)."""
valid = False
if ('iSCSI' in self._state['enabled_protocols'] and
'initiator' in connector):
valid = True
if 'FC' in self._state['enabled_protocols'] and 'wwpns' in connector:
valid = True
if not valid:
msg = (_LE('The connector does not contain the required '
'information.'))
LOG.error(msg)
raise exception.InvalidConnectorException(
missing='initiator or wwpns')
def _get_vdisk_params(self, type_id, volume_type=None,
volume_metadata=None):
return self._helpers.get_vdisk_params(self.configuration, self._state,
type_id, volume_type=volume_type,
volume_metadata=volume_metadata)
@fczm_utils.AddFCZone
@utils.synchronized('storwize-host', external=True)
def initialize_connection(self, volume, connector):
"""Perform the necessary work so that an iSCSI/FC connection can
be made.
To be able to create an iSCSI/FC connection from a given host to a
volume, we must:
1. Translate the given iSCSI name or WWNN to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
volume_name = volume['name']
# Delete irrelevant connection information that later could result
# in unwanted behaviour. For example, if FC is used yet the hosts
# return iSCSI data, the driver will try to create the iSCSI connection
# which can result in a nice error about reaching the per-host maximum
# iSCSI initiator limit.
# First make a copy so we don't mess with a caller's connector.
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
# Check if a host object is defined for this host name
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = self._helpers.create_host(connector)
if vol_opts['protocol'] == 'iSCSI':
chap_secret = self._helpers.get_chap_secret_for_host(host_name)
chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
if chap_enabled and chap_secret is None:
chap_secret = self._helpers.add_chap_secret_to_host(host_name)
elif not chap_enabled and chap_secret:
LOG.warning(_LW('CHAP secret exists for host but CHAP is '
'disabled'))
volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error(_LE('Did not find expected column name in '
'lsvdisk: %s') % e)
msg = (_('initialize_connection: Missing volume '
'attribute for volume %s') % volume_name)
raise exception.VolumeBackendAPIException(data=msg)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in self._state['storage_nodes'].itervalues():
if vol_opts['protocol'] not in node['enabled_protocols']:
continue
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry and not vol_opts['multipath']:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warn(_LW('initialize_connection: Did not find a preferred '
'node for volume %s') % volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume['id']
if vol_opts['protocol'] == 'iSCSI':
type_str = 'iscsi'
if len(preferred_node_entry['ipv4']):
ipaddr = preferred_node_entry['ipv4'][0]
else:
ipaddr = preferred_node_entry['ipv6'][0]
properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
properties['target_iqn'] = preferred_node_entry['iscsi_name']
if chap_secret:
properties['auth_method'] = 'CHAP'
properties['auth_username'] = connector['initiator']
properties['auth_password'] = chap_secret
properties['discovery_auth_method'] = 'CHAP'
properties['discovery_auth_username'] = (
connector['initiator'])
properties['discovery_auth_password'] = chap_secret
else:
type_str = 'fibre_channel'
conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
# If conn_wwpns is empty, then that means that there were
# no target ports with visibility to any of the initiators.
# We will either fail the attach, or return all target
# ports, depending on the value of the
# storwize_svc_npiv_compatibity_mode flag.
if len(conn_wwpns) == 0:
npiv_compat = self.configuration.\
storwize_svc_npiv_compatibility_mode
if not npiv_compat:
msg = (_('Could not get FC connection information for '
'the host-volume connection. Is the host '
'configured properly for FC connections?'))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
for node in self._state['storage_nodes'].itervalues():
conn_wwpns.extend(node['WWPN'])
if not vol_opts['multipath']:
# preferred_node_entry can have a list of WWPNs while only
# one WWPN may be available on the storage host. Here we
# walk through the nodes until we find one that works,
# default to the first WWPN otherwise.
for WWPN in preferred_node_entry['WWPN']:
if WWPN in conn_wwpns:
properties['target_wwn'] = WWPN
break
else:
LOG.warning(_LW('Unable to find a preferred node match'
' for node %(node)s in the list of '
'available WWPNs on %(host)s. '
'Using first available.') %
{'node': preferred_node,
'host': host_name})
properties['target_wwn'] = conn_wwpns[0]
else:
properties['target_wwn'] = conn_wwpns
i_t_map = self._make_initiator_target_map(connector['wwpns'],
conn_wwpns)
properties['initiator_target_map'] = i_t_map
# specific for z/VM, refer to cinder bug 1323993
if "zvm_fcp" in connector:
properties['zvm_fcp'] = connector['zvm_fcp']
except Exception:
with excutils.save_and_reraise_exception():
self.terminate_connection(volume, connector)
LOG.error(_LE('initialize_connection: Failed '
'to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n'), {'vol': volume,
'conn': connector})
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s',
{'vol': volume['id'], 'conn': connector,
'prop': properties})
return {'driver_volume_type': type_str, 'data': properties, }
def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
@fczm_utils.RemoveFCZone
@utils.synchronized('storwize-host', external=True)
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an iSCSI connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume['id'], 'conn': connector})
vol_name = volume['name']
if 'host' in connector:
# maybe two hosts on the storage, one is for FC and the other for
# iSCSI, so get host according to protocol
vol_opts = self._get_vdisk_params(volume['volume_type_id'])
connector = connector.copy()
if vol_opts['protocol'] == 'FC':
connector.pop('initiator', None)
elif vol_opts['protocol'] == 'iSCSI':
connector.pop('wwnns', None)
connector.pop('wwpns', None)
host_name = self._helpers.get_host_from_connector(connector)
if host_name is None:
msg = (_('terminate_connection: Failed to get host name from'
' connector.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
# See bug #1244257
host_name = None
info = {}
if 'wwpns' in connector and host_name:
target_wwpns = self._helpers.get_conn_fc_wwpns(host_name)
init_targ_map = self._make_initiator_target_map(connector['wwpns'],
target_wwpns)
info = {'driver_volume_type': 'fibre_channel',
'data': {'initiator_target_map': init_targ_map}}
self._helpers.unmap_vol_from_host(vol_name, host_name)
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s', {'vol': volume['id'],
'conn': connector})
return info
def create_volume(self, volume):
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
pool = self.configuration.storwize_svc_volpool_name
self._helpers.create_vdisk(volume['name'], str(volume['size']),
'gb', pool, opts)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
model_update = None
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
model_update = self.replication.create_replica(ctxt, volume)
return model_update
def delete_volume(self, volume):
self._helpers.delete_vdisk(volume['name'], False)
if volume['id'] in self._vdiskcopyops:
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
def create_snapshot(self, snapshot):
ctxt = context.get_admin_context()
try:
source_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
except Exception:
msg = (_('create_snapshot: get source volume failed.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
opts = self._get_vdisk_params(source_vol['volume_type_id'])
self._helpers.create_copy(snapshot['volume_name'], snapshot['name'],
snapshot['volume_id'], self.configuration,
opts, False)
def delete_snapshot(self, snapshot):
self._helpers.delete_vdisk(snapshot['name'], False)
def create_volume_from_snapshot(self, volume, snapshot):
if volume['size'] != snapshot['volume_size']:
msg = (_('create_volume_from_snapshot: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_metadata'))
self._helpers.create_copy(snapshot['name'], volume['name'],
snapshot['id'], self.configuration,
opts, True)
if opts['qos']:
self._helpers.add_vdisk_qos(volume['name'], opts['qos'])
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
replica_status = self.replication.create_replica(ctxt, volume)
if replica_status:
return replica_status
def create_cloned_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
opts = self._get_vdisk_params(tgt_volume['volume_type_id'],
volume_metadata=
tgt_volume.get('volume_metadata'))
self._helpers.create_copy(src_volume['name'], tgt_volume['name'],
src_volume['id'], self.configuration,
opts, True)
if opts['qos']:
self._helpers.add_vdisk_qos(tgt_volume['name'], opts['qos'])
if 'replication' in opts and opts['replication']:
ctxt = context.get_admin_context()
replica_status = self.replication.create_replica(ctxt, tgt_volume)
if replica_status:
return replica_status
def extend_volume(self, volume, new_size):
LOG.debug('enter: extend_volume: volume %s' % volume['id'])
ret = self._helpers.ensure_vdisk_no_fc_mappings(volume['name'],
allow_snaps=False)
if not ret:
msg = (_('extend_volume: Extending a volume with snapshots is not '
'supported.'))
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
extend_amt = int(new_size) - volume['size']
self._helpers.extend_vdisk(volume['name'], extend_amt)
LOG.debug('leave: extend_volume: volume %s' % volume['id'])
def add_vdisk_copy(self, volume, dest_pool, vol_type):
return self._helpers.add_vdisk_copy(volume, dest_pool,
vol_type, self._state,
self.configuration)
def _add_vdisk_copy_op(self, ctxt, volume, new_op):
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if curr_ops:
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
new_ops_list = curr_ops_list.append(new_op)
else:
new_ops_list = [new_op]
new_ops_str = ';'.join([':'.join(x) for x in new_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
if volume['id'] in self._vdiskcopyops:
self._vdiskcopyops[volume['id']].append(new_op)
else:
self._vdiskcopyops[volume['id']] = [new_op]
# We added the first copy operation, so start the looping call
if len(self._vdiskcopyops) == 1:
self._vdiskcopyops_loop = loopingcall.FixedIntervalLoopingCall(
self._check_volume_copy_ops)
self._vdiskcopyops_loop.start(interval=self.VDISKCOPYOPS_INTERVAL)
def _rm_vdisk_copy_op(self, ctxt, volume, orig_copy_id, new_copy_id):
try:
self._vdiskcopyops[volume['id']].remove((orig_copy_id,
new_copy_id))
if not len(self._vdiskcopyops[volume['id']]):
del self._vdiskcopyops[volume['id']]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
except KeyError:
msg = (_('_rm_vdisk_copy_op: Volume %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
return
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s does not have the '
'specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
return
metadata = self.db.volume_admin_metadata_get(ctxt.elevated(),
volume['id'])
curr_ops = metadata.get('vdiskcopyops', None)
if not curr_ops:
msg = (_('_rm_vdisk_copy_op: Volume metadata %s does not have any '
'registered vdisk copy operations.') % volume['id'])
LOG.error(msg)
return
curr_ops_list = [tuple(x.split(':')) for x in curr_ops.split(';')]
try:
curr_ops_list.remove((orig_copy_id, new_copy_id))
except ValueError:
msg = (_('_rm_vdisk_copy_op: Volume %(vol)s metadata does not '
'have the specified vdisk copy operation: orig=%(orig)s '
'new=%(new)s.')
% {'vol': volume['id'], 'orig': orig_copy_id,
'new': new_copy_id})
LOG.error(msg)
return
if len(curr_ops_list):
new_ops_str = ';'.join([':'.join(x) for x in curr_ops_list])
self.db.volume_admin_metadata_update(ctxt.elevated(), volume['id'],
{'vdiskcopyops': new_ops_str},
False)
else:
self.db.volume_admin_metadata_delete(ctxt.elevated(), volume['id'],
'vdiskcopyops')
def promote_replica(self, ctxt, volume):
return self.replication.promote_replica(volume)
def reenable_replication(self, ctxt, volume):
return self.replication.reenable_replication(volume)
def create_replica_test_volume(self, tgt_volume, src_volume):
if src_volume['size'] != tgt_volume['size']:
msg = (_('create_cloned_volume: Source and destination '
'size differ.'))
LOG.error(msg)
raise exception.InvalidInput(message=msg)
replica_status = self.replication.test_replica(tgt_volume,
src_volume)
return replica_status
def get_replication_status(self, ctxt, volume):
replica_status = None
if self.replication:
replica_status = self.replication.get_replication_status(volume)
return replica_status
def _check_volume_copy_ops(self):
LOG.debug("enter: update volume copy status")
ctxt = context.get_admin_context()
copy_items = self._vdiskcopyops.items()
for vol_id, copy_ops in copy_items:
try:
volume = self.db.volume_get(ctxt, vol_id)
except Exception:
LOG.warn(_LW('Volume %s does not exist.'), vol_id)
del self._vdiskcopyops[vol_id]
if not len(self._vdiskcopyops):
self._vdiskcopyops_loop.stop()
self._vdiskcopyops_loop = None
continue
for copy_op in copy_ops:
try:
synced = self._helpers.is_vdisk_copy_synced(volume['name'],
copy_op[1])
except Exception:
msg = (_('_check_volume_copy_ops: Volume %(vol)s does not '
'have the specified vdisk copy operation: '
'orig=%(orig)s new=%(new)s.')
% {'vol': volume['id'], 'orig': copy_op[0],
'new': copy_op[1]})
LOG.info(msg)
else:
if synced:
self._helpers.rm_vdisk_copy(volume['name'], copy_op[0])
self._rm_vdisk_copy_op(ctxt, volume, copy_op[0],
copy_op[1])
LOG.debug("exit: update volume copy status")
def migrate_volume(self, ctxt, volume, host):
"""Migrate directly if source and dest are managed by same storage.
We create a new vdisk copy in the desired pool, and add the original
vdisk copy to the admin_metadata of the volume to be deleted. The
deletion will occur using a periodic task once the new copy is synced.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
LOG.debug('enter: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
false_ret = (False, None)
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return false_ret
ctxt = context.get_admin_context()
if volume['volume_type_id'] is not None:
volume_type_id = volume['volume_type_id']
vol_type = volume_types.get_volume_type(ctxt, volume_type_id)
else:
vol_type = None
self._check_volume_copy_ops()
new_op = self.add_vdisk_copy(volume['name'], dest_pool, vol_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
LOG.debug('leave: migrate_volume: id=%(id)s, host=%(host)s' %
{'id': volume['id'], 'host': host['host']})
return (True, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
def retype_iogrp_property(volume, new, old):
if new != old:
self._helpers.change_vdisk_iogrp(volume['name'],
self._state, (new, old))
LOG.debug('enter: retype: id=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host})
ignore_keys = ['protocol', 'multipath']
no_copy_keys = ['warning', 'autoexpand', 'easytier']
copy_keys = ['rsize', 'grainsize', 'compression']
all_keys = ignore_keys + no_copy_keys + copy_keys
old_opts = self._get_vdisk_params(volume['volume_type_id'],
volume_metadata=
volume.get('volume_matadata'))
new_opts = self._get_vdisk_params(new_type['id'],
volume_type=new_type)
# Check if retype affects volume replication
model_update = None
old_type_replication = old_opts.get('replication', False)
new_type_replication = new_opts.get('replication', False)
# Delete replica if needed
if old_type_replication and not new_type_replication:
self.replication.delete_replica(volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': None,
'replication_extended_status': None}
vdisk_changes = []
need_copy = False
for key in all_keys:
if old_opts[key] != new_opts[key]:
if key in copy_keys:
need_copy = True
break
elif key in no_copy_keys:
vdisk_changes.append(key)
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] != dest_location:
need_copy = True
if need_copy:
self._check_volume_copy_ops()
dest_pool = self._helpers.can_migrate_to_host(host, self._state)
if dest_pool is None:
return False
# If volume is replicated, can't copy
if new_type_replication:
msg = (_('Unable to retype: Current action needs volume-copy,'
' it is not allowed when new type is replication.'
' Volume = %s'), volume['id'])
raise exception.VolumeDriverException(message=msg)
retype_iogrp_property(volume,
new_opts['iogrp'],
old_opts['iogrp'])
try:
new_op = self.add_vdisk_copy(volume['name'],
dest_pool,
new_type)
self._add_vdisk_copy_op(ctxt, volume, new_op)
except exception.VolumeDriverException:
# roll back changing iogrp property
retype_iogrp_property(volume, old_opts['iogrp'],
new_opts['iogrp'])
msg = (_('Unable to retype: A copy of volume %s exists. '
'Retyping would exceed the limit of 2 copies.'),
volume['id'])
raise exception.VolumeDriverException(message=msg)
else:
retype_iogrp_property(volume, new_opts['iogrp'], old_opts['iogrp'])
self._helpers.change_vdisk_options(volume['name'], vdisk_changes,
new_opts, self._state)
if new_opts['qos']:
# Add the new QoS setting to the volume. If the volume has an
# old QoS setting, it will be overwritten.
self._helpers.update_vdisk_qos(volume['name'], new_opts['qos'])
elif old_opts['qos']:
# If the old_opts contain QoS keys, disable them.
self._helpers.disable_vdisk_qos(volume['name'], old_opts['qos'])
# Add replica if needed
if not old_type_replication and new_type_replication:
model_update = self.replication.create_replica(ctxt, volume,
new_type)
LOG.debug('exit: retype: ild=%(id)s, new_type=%(new_type)s,'
'diff=%(diff)s, host=%(host)s' % {'id': volume['id'],
'new_type': new_type,
'diff': diff,
'host': host['host']})
return True, model_update
def manage_existing(self, volume, ref):
"""Manages an existing vdisk.
Renames the vdisk to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated -
if we got here then we have a vdisk that isn't in use (or we don't
care if it is in use.
"""
vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
if vdisk is None:
reason = (_('No vdisk with the UID specified by source-id %s.')
% ref['source-id'])
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
self._helpers.rename_vdisk(vdisk['name'], volume['name'])
def manage_existing_get_size(self, volume, ref):
"""Return size of an existing Vdisk for manage_existing.
existing_ref is a dictionary of the form:
{'source-id': <uid of disk>}
Optional elements are:
'manage_if_in_use': True/False (default is False)
If set to True, a volume will be managed even if it is currently
attached to a host system.
"""
# Check that the reference is valid
if 'source-id' not in ref:
reason = _('Reference must contain source-id element.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check for existence of the vdisk
vdisk = self._helpers.vdisk_by_uid(ref['source-id'])
if vdisk is None:
reason = (_('No vdisk with the UID specified by source-id %s.')
% (ref['source-id']))
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
# Check if the disk is in use, if we need to.
manage_if_in_use = ref.get('manage_if_in_use', False)
if (not manage_if_in_use and
self._helpers.is_vdisk_in_use(vdisk['name'])):
reason = _('The specified vdisk is mapped to a host.')
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If we haven't gotten stats yet or 'refresh' is True,
run update the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def create_consistencygroup(self, context, group):
"""Create a consistency group.
IBM Storwize will create CG until cg-snapshot creation,
db will maintain the volumes and CG relationship.
"""
LOG.debug("Creating consistency group")
model_update = {'status': 'available'}
return model_update
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group.
IBM Storwize will delete the volumes of the CG.
"""
LOG.debug("deleting consistency group")
model_update = {}
model_update['status'] = 'deleted'
volumes = self.db.volume_get_all_by_group(context, group['id'])
for volume in volumes:
try:
self._helpers.delete_vdisk(volume['name'], True)
volume['status'] = 'deleted'
except exception.VolumeBackendAPIException as err:
volume['status'] = 'error_deleting'
if model_update['status'] != 'error_deleting':
model_update['status'] = 'error_deleting'
LOG.error(_LE("Failed to delete the volume %(vol)s of CG. "
"Exception: %(exception)s."),
{'vol': volume['name'], 'exception': err})
return model_update, volumes
def create_cgsnapshot(self, ctxt, cgsnapshot):
"""Creates a cgsnapshot."""
# Use cgsnapshot id as cg name
cg_name = 'cg_snap-' + cgsnapshot['id']
# Create new cg as cg_snapshot
self._helpers.create_fc_consistgrp(cg_name)
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
ctxt, cgsnapshot['id'])
timeout = self.configuration.storwize_svc_flashcopy_timeout
model_update, snapshots_model = (
self._helpers.run_consistgrp_snapshots(cg_name,
snapshots,
self._state,
self.configuration,
timeout))
return model_update, snapshots_model
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
cgsnapshot_id = cgsnapshot['id']
cg_name = 'cg_snap-' + cgsnapshot_id
snapshots = self.db.snapshot_get_all_for_cgsnapshot(context,
cgsnapshot_id)
model_update, snapshots_model = (
self._helpers.delete_consistgrp_snapshots(cg_name,
snapshots))
return model_update, snapshots_model
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
data = {}
data['vendor_name'] = 'IBM'
data['driver_version'] = self.VERSION
data['storage_protocol'] = list(self._state['enabled_protocols'])
data['total_capacity_gb'] = 0 # To be overwritten
data['free_capacity_gb'] = 0 # To be overwritten
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['consistencygroup_support'] = True
pool = self.configuration.storwize_svc_volpool_name
backend_name = self.configuration.safe_get('volume_backend_name')
if not backend_name:
backend_name = '%s_%s' % (self._state['system_name'], pool)
data['volume_backend_name'] = backend_name
attributes = self._helpers.get_pool_attrs(pool)
if not attributes:
LOG.error(_LE('Could not get pool data from the storage'))
exception_message = (_('_update_volume_stats: '
'Could not get storage pool data'))
raise exception.VolumeBackendAPIException(data=exception_message)
data['total_capacity_gb'] = (float(attributes['capacity']) /
units.Gi)
data['free_capacity_gb'] = (float(attributes['free_capacity']) /
units.Gi)
data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
data['compression_support'] = self._state['compression_enabled']
data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
{'sys_id': self._state['system_id'],
'pool': pool})
if self.replication:
data.update(self.replication.get_replication_info())
self._stats = data
| apache-2.0 |
akaihola/bitcoin-price | old_versions/fast_dump_v11.py | 4 | 1386 | #!/usr/bin/env python3
import requests
import sys
def get_all():
page_num = 1
price_data = ''
while True:
req = requests.get("http://coinbase.com/api/v1/prices/historical?page="+str(page_num))
if req.status_code == 200:
price_data += '\n' + req.text
else:
price_data += "API error"
print("... getting page "+str(page_num))
page_num += 1
if req.text == "":
break
return price_data
if __name__ == '__main__':
sys.stdout.write(get_all())
#with open('.tmp/{}_full_output.py'.format(int(time.time())), 'a') as f1:
# f1.write('\n'+ price_data)
#price_data_format1 = price_data.replace(',','\n')
#with open('.tmp/{}_lines_removed.py'.format(int(time.time())), 'a') as f2:
# f2.write('\n' + price_data_format1)
#price_data_format2 = price_data_format1.split('\n')
#with open('.tmp/{}_xyxy.py'.format(int(time.time())), 'a') as f3:
# f3.write(str(price_data_format2))
#prices = price_data_format2[::2]
#k=1
#with open('.tmp/{}_prices.py'.format(int(time.time())), 'a') as f4:
# while k<len(prices):
# f4.write('{!r}\n'.format(prices[k]))
# k+=1
#timestamps = price_data_format2[1::2]
#j=1
#with open('.tmp/{}_stamps.py'.format(int(time.time())), 'a') as f5:
# while j<len(timestamps):
# f5.write('{!r}\n'.format(timestamps[j]))
# j += 1
| mit |
johankaito/fufuka | microblog/flask/venv/lib/python2.7/site-packages/pip/_vendor/distlib/metadata.py | 427 | 38314 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Implementation of the Metadata for Python packages PEPs.
Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
"""
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP440_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP440_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
if key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
result = self._data.get(key, value)
else:
# special cases for PEP 459
sentinel = object()
result = sentinel
d = self._data.get('extensions')
if d:
if key == 'commands':
result = d.get('python.commands', value)
elif key == 'classifiers':
d = d.get('python.details')
if d:
result = d.get(key, value)
else:
d = d.get('python.exports')
if d:
result = d.get(key, value)
if result is sentinel:
result = value
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError('%r is an invalid value for '
'the %r property' % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
elif key not in ('commands', 'exports', 'modules', 'namespaces',
'classifiers'):
self._data[key] = value
else:
# special cases for PEP 459
d = self._data.setdefault('extensions', {})
if key == 'commands':
d['python.commands'] = value
elif key == 'classifiers':
d = d.setdefault('python.details', {})
d[key] = value
else:
d = d.setdefault('python.exports', {})
d[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
| apache-2.0 |
henriquefacioli/gd-ae- | gda/admin.py | 1 | 1632 | from django.contrib import admin
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from import_export.admin import ImportExportActionModelAdmin
from gda.models import Questionnaire, Question, Choice, Answer
## Questionnaires
# Class to import and export Questionnaire
class QuestionnaireResource(resources.ModelResource):
class Meta:
model = Questionnaire
class QuestionnaireAdmin(ImportExportModelAdmin):
resource_class = QuestionnaireResource
list_display = ("pk", "name")
search_fields = ('id','name')
ordering = ['id']
admin.site.register(Questionnaire, QuestionnaireAdmin)
## Questions
# Class to import and export Questions
class QuestionResource(resources.ModelResource):
class Meta:
model = Question
class QuestionAdmin(ImportExportModelAdmin):
resource_class = QuestionResource
search_fields = ('text','type')
ordering = ['id']
admin.site.register(Question, QuestionAdmin)
## Choices
class ChoiceResource(resources.ModelResource):
class Meta:
model = Choice
class ChoiceAdmin(ImportExportModelAdmin):
resource_class = ChoiceResource
search_fields = ('id','text')
ordering = ['id']
admin.site.register(Choice, ChoiceAdmin)
## Answers
class AnswerResource(resources.ModelResource):
class Meta:
model = Answer
class AnswerAdmin(ImportExportModelAdmin):
resource_class = AnswerResource
list_display = ("question",)
search_fields = ('offering__subject__code',
'question__id')
ordering = ['question']
admin.site.register(Answer, AnswerAdmin)
| gpl-3.0 |
hutchison/bp_mgmt | bp_cupid/tests/test_login.py | 1 | 1795 | from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from django.contrib.auth.models import User
class TestLogin(StaticLiveServerTestCase):
def setUp(self):
self.username = 'alice'
self.email = '[email protected]'
self.password = 'test'
User.objects.create_user(self.username, self.email, self.password)
self.browser = webdriver.Firefox()
#self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_successful_login(self):
self.browser.get(self.live_server_url)
self.browser.find_element_by_link_text('Login').click()
input_username = self.browser.find_element_by_id('id_username')
input_username.send_keys(self.username)
input_password = self.browser.find_element_by_id('id_password')
input_password.send_keys(self.password)
self.browser.find_element_by_css_selector('[type=submit]').click()
self.assertIsNotNone(self.browser.find_element_by_id('logout'))
def test_failing_login(self):
self.browser.get(self.live_server_url)
self.browser.find_element_by_link_text('Login').click()
input_username = self.browser.find_element_by_id('id_username')
input_username.send_keys(self.username)
input_password = self.browser.find_element_by_id('id_password')
input_password.send_keys('foobar')
self.browser.find_element_by_css_selector('[type=submit]').click()
alert = self.browser.find_element_by_class_name('alert-danger')
self.assertEqual(
alert.text,
'Bitte einen gültigen Benutzername und ein Passwort eingeben. Beide Felder berücksichtigen die Groß-/Kleinschreibung.'
)
| agpl-3.0 |
bbannier/ROOT | interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/util.py | 12 | 2609 | # This file provides common utility functions for the test suite.
from clang.cindex import Cursor
from clang.cindex import TranslationUnit
def get_tu(source, lang='c', all_warnings=False, flags=[]):
"""Obtain a translation unit from source and language.
By default, the translation unit is created from source file "t.<ext>"
where <ext> is the default file extension for the specified language. By
default it is C, so "t.c" is the default file name.
Supported languages are {c, cpp, objc}.
all_warnings is a convenience argument to enable all compiler warnings.
"""
args = list(flags)
name = 't.c'
if lang == 'cpp':
name = 't.cpp'
args.append('-std=c++11')
elif lang == 'objc':
name = 't.m'
elif lang != 'c':
raise Exception('Unknown language: %s' % lang)
if all_warnings:
args += ['-Wall', '-Wextra']
return TranslationUnit.from_source(name, args, unsaved_files=[(name,
source)])
def get_cursor(source, spelling):
"""Obtain a cursor from a source object.
This provides a convenient search mechanism to find a cursor with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If the cursor is not found, None is returned.
"""
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
return cursor
# Recurse into children.
result = get_cursor(cursor, spelling)
if result is not None:
return result
return None
def get_cursors(source, spelling):
"""Obtain all cursors from a source object with a specific spelling.
This provides a convenient search mechanism to find all cursors with specific
spelling within a source. The first argument can be either a
TranslationUnit or Cursor instance.
If no cursors are found, an empty list is returned.
"""
cursors = []
children = []
if isinstance(source, Cursor):
children = source.get_children()
else:
# Assume TU
children = source.cursor.get_children()
for cursor in children:
if cursor.spelling == spelling:
cursors.append(cursor)
# Recurse into children.
cursors.extend(get_cursors(cursor, spelling))
return cursors
__all__ = [
'get_cursor',
'get_cursors',
'get_tu',
]
| lgpl-2.1 |
StephaneP/volatility | volatility/conf.py | 57 | 15263 | ## This file was taken from PyFlag http://www.pyflag.net/
# Michael Cohen <[email protected]>
# David Collett <[email protected]>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ******************************************************
#pylint: disable-msg=C0111
""" Configuration modules for pyflag.
PyFlag is a complex package and requires a flexible configuration
system. The following are the requirements of the configuration
system:
1) Configuration must be available from a number of sources:
- Autoconf must be able to set things like the python path (in case
pyflag is installed to a different prefix)
- Users must be able to configure the installed system for their
specific requirements.
- Unconfigured parameters must be resolved at run time through the
GUI and saved.
2) Configuration must be able to apply to cases specifically.
3) Because pyflag is modular, configuration variables might be required
for each module. This means that definitions and declarations of
configuration variables must be distributed in each plugin.
These goals are achieved by the use of multiple sources of
configuration information:
- The system wide configuration file is this file: conf.py. It is
generated from the build system from conf.py.in by substituting
autoconfigured variables into it. It contains the most basic
settings related to the installation, e.g. which python interpreted
is used, where the python modules are installed etc. In particular
it refers to the location of the system configuration file (usually
found in /usr/local/etc/pyflagrc, or in /etc/pyflagrc).
- The sysconfig file contains things like where the upload
directory is, where to store temporary files etc. These are mainly
installation wide settings which are expected to be modified by the
administrator. Note that if you want the GUI to manipulate this
file it needs to be writable by the user running the GUI.
- Finally a conf table in each case is used to provide a per case
configuration
"""
import ConfigParser
import optparse
import os
import sys
default_config = "/etc/volatilityrc"
class PyFlagOptionParser(optparse.OptionParser):
final = False
help_hooks = []
def _process_args(self, largs, rargs, values):
try:
return optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.OptionValueError), err:
if self.final:
raise err
def error(self, msg):
## We cant emit errors about missing parameters until we are
## sure that all modules have registered all their parameters
if self.final:
return optparse.OptionParser.error(self, msg)
else:
raise RuntimeError(msg)
def print_help(self, file = sys.stdout):
optparse.OptionParser.print_help(self, file)
for cb in self.help_hooks:
file.write(cb())
class ConfObject(object):
""" This is a singleton class to manage the configuration.
This means it can be instantiated many times, but each instance
refers to the global configuration (which is set in class
variables).
NOTE: The class attributes have static dicts assigned to
facilitate singleton behaviour. This means all future instances
will have the same dicts.
"""
optparser = PyFlagOptionParser(add_help_option = False,
version = False,
)
initialised = False
## This is the globals dictionary which will be used for
## evaluating the configuration directives.
g_dict = dict(__builtins__ = None)
## These are the options derived by reading any config files
cnf_opts = {}
## Command line opts
opts = {}
args = None
default_opts = {}
docstrings = {}
## These are the actual options returned by the optparser:
optparse_opts = None
## Filename where the configuration file is:
_filename = None
_filenames = []
## These parameters can not be updated by the GUI (but will be
## propagated into new configuration files)
readonly = {}
## Absolute parameters can only be set by the code or command
## lines, they can not be over ridden in the configuration
## file. This ensures that only configuration files dont mask new
## options (e.g. schema version)
_absolute = {}
## A list of option names:
options = []
## Cache variants: There are configuration options which
## encapsulate the state of the running program. If any of these
## change all caches will be invalidated.
cache_invalidators = {}
def __init__(self):
""" This is a singleton object kept in the class """
if not ConfObject.initialised:
self.optparser.add_option("-h", "--help", action = "store_true", default = False,
help = "list all available options and their default values. Default values may be set in the configuration file (" + default_config + ")")
ConfObject.initialised = True
def set_usage(self, usage = None, version = None):
if usage:
self.optparser.set_usage(usage)
if version:
self.optparser.version = version
def add_file(self, filename, _type = 'init'):
""" Adds a new file to parse """
self._filenames.append(filename)
self.cnf_opts.clear()
for f in self._filenames:
try:
conf_parser = ConfigParser.ConfigParser()
conf_parser.read(f)
for k, v in conf_parser.items('DEFAULT'):
## Absolute parameters are protected from
## configuration files:
if k in self._absolute.keys():
continue
try:
v = eval(v, self.g_dict)
except Exception, _e:
pass
## update the configured options
self.cnf_opts[k] = v
except IOError:
print "Unable to open {0}".format(f)
ConfObject._filename = filename
def print_help(self):
return self.optparser.print_help()
def add_help_hook(self, cb):
""" Adds an epilog to the help message """
self.optparser.help_hooks.append(cb)
def set_help_hook(self, cb):
self.optparser.help_hooks = [cb]
def parse_options(self, final = True):
""" Parses the options from command line and any conf files
currently added.
The final parameter should be only called from main programs
at the point where they are prepared for us to call exit if
required; (For example when we detect the -h parameter).
"""
self.optparser.final = final
## Parse the command line options:
try:
(opts, args) = self.optparser.parse_args()
self.opts.clear()
## Update our cmdline dict:
for k in dir(opts):
v = getattr(opts, k)
if k in self.options and not v == None:
self.opts[k] = v
except UnboundLocalError:
raise RuntimeError("Unknown option - use -h to see help")
## If error() was called we catch it here
except RuntimeError:
opts = {}
## This gives us as much as was parsed so far
args = self.optparser.largs
self.optparse_opts = opts
self.args = args
if final:
## Reparse the config file again:
self.add_file(self._filename)
try:
## Help can only be set on the command line
if getattr(self.optparse_opts, "help"):
## Populate the metavars with the default values:
for opt in self.optparser.option_list:
try:
opt.metavar = "{0}".format((getattr(self, opt.dest) or
opt.dest.upper()))
except Exception, _e:
pass
self.optparser.print_help()
sys.exit(0)
except AttributeError:
pass
## Set the cache invalidators on the cache now:
import volatility.cache as cache
for k, v in self.cache_invalidators.items():
cache.CACHE.invalidate_on(k, v)
def remove_option(self, option):
""" Removes options both from the config file parser and the
command line parser
This should only by used on options *before* they have been read,
otherwise things could get very confusing.
"""
option = option.lower()
if option in self.cache_invalidators:
del self.cache_invalidators[option]
normalized_option = option.replace("-", "_")
if normalized_option not in self.options:
return
self.options.remove(normalized_option)
if normalized_option in self.readonly:
del self.readonly[normalized_option]
if normalized_option in self.default_opts:
del self.default_opts[normalized_option]
if normalized_option in self._absolute:
del self._absolute[normalized_option]
del self.docstrings[normalized_option]
self.optparser.remove_option("--{0}".format(option))
try:
self.parse_options(False)
except AttributeError:
pass
def add_option(self, option, short_option = None,
cache_invalidator = True,
**args):
""" Adds options both to the config file parser and the
command line parser.
Args:
option: The long option name.
short_option: An optional short option.
cache_invalidator: If set, when this option
changes all caches are invalidated.
"""
option = option.lower()
if cache_invalidator:
self.cache_invalidators[option] = lambda : self.get_value(option)
normalized_option = option.replace("-", "_")
if normalized_option in self.options:
return
self.options.append(normalized_option)
## If this is read only we store it in a special dict
try:
if args['readonly']:
self.readonly[normalized_option] = args['default']
del args['readonly']
except KeyError:
pass
## If there is a default specified, we update our defaults dict:
try:
default = args['default']
try:
default = eval(default, self.g_dict)
except:
pass
self.default_opts[normalized_option] = default
del args['default']
except KeyError:
pass
try:
self._absolute[normalized_option] = args['absolute']
del args['absolute']
except KeyError:
pass
self.docstrings[normalized_option] = args.get('help', None)
if short_option:
self.optparser.add_option("-{0}".format(short_option), "--{0}".format(option), **args)
else:
self.optparser.add_option("--{0}".format(option), **args)
## update the command line parser
## We have to do the try-catch for python 2.4 support of short
## arguments. It can be removed when python 2.5 is a requirement
try:
self.parse_options(False)
except AttributeError:
pass
def update(self, key, value):
""" This can be used by scripts to force a value of an option """
self.readonly[key.lower()] = value
def get_value(self, key):
return getattr(self, key.replace("-", "_"))
def __getattr__(self, attr):
## If someone is looking for a configuration parameter but
## we have not parsed anything yet - do so now.
if self.opts == None:
self.parse_options(False)
## Maybe its a class method?
try:
return super(ConfObject, self).__getattribute__(attr)
except AttributeError:
pass
## Is it a ready only parameter (i.e. can not be overridden by
## the config file)
try:
return self.readonly[attr.lower()]
except KeyError:
pass
## Try to find the attribute in the command line options:
try:
return self.opts[attr.lower()]
except KeyError:
pass
## Has it already been parsed?
try:
tmp = getattr(self.optparser.values, attr.lower())
if tmp:
return tmp
except AttributeError:
pass
## Was it given in the environment?
try:
return os.environ["VOLATILITY_" + attr.upper()]
except KeyError:
pass
## No - try the configuration file:
try:
return self.cnf_opts[attr.lower()]
except KeyError:
pass
## No - is there a default for it?
try:
return self.default_opts[attr.lower()]
except KeyError:
pass
## Maybe its just a command line option:
try:
if not attr.startswith("_") and self.optparse_opts:
return getattr(self.optparse_opts, attr.lower())
except AttributeError:
pass
raise AttributeError("Parameter {0} is not configured - try setting it on the command line (-h for help)".format(attr))
class DummyConfig(ConfObject):
pass
config = ConfObject()
if os.access(default_config, os.R_OK):
config.add_file(default_config)
else:
config.add_file("volatilityrc")
default_conf_path = ".volatilityrc"
try:
default_conf_path = os.environ['HOME'] + '/.volatilityrc'
except KeyError:
pass
config.add_option("CONF-FILE", default = default_conf_path,
cache_invalidator = False,
help = "User based configuration file")
config.add_file(config.CONF_FILE)
| gpl-2.0 |
lewislone/kernel_reading | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
pincopallino93/rdfendpoints | lib/rdflib/plugins/parsers/pyRdfa/rdfs/__init__.py | 25 | 1729 | # -*- coding: utf-8 -*-
"""
Separate module to handle vocabulary expansions. The L{cache} module takes care of caching vocabulary graphs; the L{process}
module takes care of the expansion itself.
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: __init__.py,v 1.4 2012/08/20 13:15:28 ivan Exp $ $Date: 2012/08/20 13:15:28 $
"""
import sys
import os
import rdflib
from rdflib import URIRef
from rdflib import Literal
from rdflib import BNode
from rdflib import Namespace
if rdflib.__version__ >= "3.0.0" :
from rdflib import RDF as ns_rdf
from rdflib import RDFS as ns_rdfs
from rdflib import Graph
else :
from rdflib.RDFS import RDFSNS as ns_rdfs
from rdflib.RDF import RDFNS as ns_rdf
from rdflib.Graph import Graph
from .. import RDFaError, pyRdfaError
from .. import ns_rdfa, ns_xsd, ns_distill
VocabCachingInfo = ns_distill["VocabCachingInfo"]
# Error message texts
err_outdated_cache = "Vocab document <%s> could not be dereferenced; using possibly outdated cache"
err_unreachable_vocab = "Vocab document <%s> could not be dereferenced"
err_unparsable_Turtle_vocab = "Could not parse vocab in Turtle at <%s> (%s)"
err_unparsable_xml_vocab = "Could not parse vocab in RDF/XML at <%s> (%s)"
err_unparsable_ntriples_vocab = "Could not parse vocab in N-Triple at <%s> (%s)"
err_unparsable_rdfa_vocab = "Could not parse vocab in RDFa at <%s> (%s)"
err_unrecognised_vocab_type = "Unrecognized media type for the vocab file <%s>: '%s'"
| apache-2.0 |
chispita/epiwork | apps/survey/migrations/0005_verify_single_user_assumption.py | 4 | 12061 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for survey_user in orm['survey.SurveyUser'].objects.all():
assert survey_user.user.count() <= 1, survey_user.global_id
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.extraresponse': {
'Meta': {'object_name': 'ExtraResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.lastresponse': {
'Meta': {'object_name': 'LastResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'})
},
'survey.localflusurvey': {
'Meta': {'object_name': 'LocalFluSurvey'},
'age_user': ('django.db.models.fields.SmallIntegerField', [], {}),
'data': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.localprofile': {
'Meta': {'object_name': 'LocalProfile'},
'a_family': ('django.db.models.fields.SmallIntegerField', [], {}),
'a_smoker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_current': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_seasonal': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_swine': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'sq_date_first': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_date_last': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_num_season': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'sq_num_total': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'survey.localresponse': {
'Meta': {'object_name': 'LocalResponse'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.participation': {
'Meta': {'object_name': 'Participation'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epidb_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'previous_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'previous_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Survey']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Survey']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.profilesendqueue': {
'Meta': {'object_name': 'ProfileSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.responsesendqueue': {
'Meta': {'object_name': 'ResponseSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specification': ('django.db.models.fields.TextField', [], {}),
'survey_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'survey.surveyuser': {
'Meta': {'object_name': 'SurveyUser'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'global_id': ('django.db.models.fields.CharField', [], {'default': "'ccb466d8-5d2d-488f-b539-5d077b609db7'", 'unique': 'True', 'max_length': '36'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'last_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
}
}
complete_apps = ['survey']
| agpl-3.0 |
harmy/kbengine | kbe/src/lib/python/Lib/test/test_plistlib.py | 55 | 7809 | # Copyright (C) 2003 Python Software Foundation
import unittest
import plistlib
import os
import datetime
from test import support
# This test data was generated through Cocoa's NSDictionary class
TESTDATA = b"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" \
"http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>aDate</key>
<date>2004-10-26T10:33:33Z</date>
<key>aDict</key>
<dict>
<key>aFalseValue</key>
<false/>
<key>aTrueValue</key>
<true/>
<key>aUnicodeValue</key>
<string>M\xc3\xa4ssig, Ma\xc3\x9f</string>
<key>anotherString</key>
<string><hello & 'hi' there!></string>
<key>deeperDict</key>
<dict>
<key>a</key>
<integer>17</integer>
<key>b</key>
<real>32.5</real>
<key>c</key>
<array>
<integer>1</integer>
<integer>2</integer>
<string>text</string>
</array>
</dict>
</dict>
<key>aFloat</key>
<real>0.5</real>
<key>aList</key>
<array>
<string>A</string>
<string>B</string>
<integer>12</integer>
<real>32.5</real>
<array>
<integer>1</integer>
<integer>2</integer>
<integer>3</integer>
</array>
</array>
<key>aString</key>
<string>Doodah</string>
<key>anInt</key>
<integer>728</integer>
<key>nestedData</key>
<array>
<data>
PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5r
PgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5
IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBi
aW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3Rz
IG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQID
PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAw==
</data>
</array>
<key>someData</key>
<data>
PGJpbmFyeSBndW5rPg==
</data>
<key>someMoreData</key>
<data>
PGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8
bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAzxs
b3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxv
dHMgb2YgYmluYXJ5IGd1bms+AAECAzxsb3RzIG9mIGJpbmFyeSBndW5rPgABAgM8bG90
cyBvZiBiaW5hcnkgZ3Vuaz4AAQIDPGxvdHMgb2YgYmluYXJ5IGd1bms+AAECAw==
</data>
<key>\xc3\x85benraa</key>
<string>That was a unicode key.</string>
</dict>
</plist>
""".replace(b" " * 8, b"\t") # Apple as well as plistlib.py output hard tabs
class TestPlistlib(unittest.TestCase):
def tearDown(self):
try:
os.unlink(support.TESTFN)
except:
pass
def _create(self):
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.5, [1, 2, 3]],
aFloat = 0.5,
anInt = 728,
aDict=dict(
anotherString="<hello & 'hi' there!>",
aUnicodeValue='M\xe4ssig, Ma\xdf',
aTrueValue=True,
aFalseValue=False,
deeperDict=dict(a=17, b=32.5, c=[1, 2, "text"]),
),
someData = plistlib.Data(b"<binary gunk>"),
someMoreData = plistlib.Data(b"<lots of binary gunk>\0\1\2\3" * 10),
nestedData = [plistlib.Data(b"<lots of binary gunk>\0\1\2\3" * 10)],
aDate = datetime.datetime(2004, 10, 26, 10, 33, 33),
)
pl['\xc5benraa'] = "That was a unicode key."
return pl
def test_create(self):
pl = self._create()
self.assertEqual(pl["aString"], "Doodah")
self.assertEqual(pl["aDict"]["aFalseValue"], False)
def test_io(self):
pl = self._create()
plistlib.writePlist(pl, support.TESTFN)
pl2 = plistlib.readPlist(support.TESTFN)
self.assertEqual(dict(pl), dict(pl2))
def test_bytes(self):
pl = self._create()
data = plistlib.writePlistToBytes(pl)
pl2 = plistlib.readPlistFromBytes(data)
self.assertEqual(dict(pl), dict(pl2))
data2 = plistlib.writePlistToBytes(pl2)
self.assertEqual(data, data2)
def test_appleformatting(self):
pl = plistlib.readPlistFromBytes(TESTDATA)
data = plistlib.writePlistToBytes(pl)
self.assertEqual(data, TESTDATA,
"generated data was not identical to Apple's output")
def test_appleformattingfromliteral(self):
pl = self._create()
pl2 = plistlib.readPlistFromBytes(TESTDATA)
self.assertEqual(dict(pl), dict(pl2),
"generated data was not identical to Apple's output")
def test_bytesio(self):
from io import BytesIO
b = BytesIO()
pl = self._create()
plistlib.writePlist(pl, b)
pl2 = plistlib.readPlist(BytesIO(b.getvalue()))
self.assertEqual(dict(pl), dict(pl2))
def test_controlcharacters(self):
for i in range(128):
c = chr(i)
testString = "string containing %s" % c
if i >= 32 or c in "\r\n\t":
# \r, \n and \t are the only legal control chars in XML
plistlib.writePlistToBytes(testString)
else:
self.assertRaises(ValueError,
plistlib.writePlistToBytes,
testString)
def test_nondictroot(self):
test1 = "abc"
test2 = [1, 2, 3, "abc"]
result1 = plistlib.readPlistFromBytes(plistlib.writePlistToBytes(test1))
result2 = plistlib.readPlistFromBytes(plistlib.writePlistToBytes(test2))
self.assertEqual(test1, result1)
self.assertEqual(test2, result2)
def test_invalidarray(self):
for i in ["<key>key inside an array</key>",
"<key>key inside an array2</key><real>3</real>",
"<true/><key>key inside an array3</key>"]:
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
("<plist><array>%s</array></plist>"%i).encode())
def test_invaliddict(self):
for i in ["<key><true/>k</key><string>compound key</string>",
"<key>single key</key>",
"<string>missing key</string>",
"<key>k1</key><string>v1</string><real>5.3</real>"
"<key>k1</key><key>k2</key><string>double key</string>"]:
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
("<plist><dict>%s</dict></plist>"%i).encode())
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
("<plist><array><dict>%s</dict></array></plist>"%i).encode())
def test_invalidinteger(self):
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
b"<plist><integer>not integer</integer></plist>")
def test_invalidreal(self):
self.assertRaises(ValueError, plistlib.readPlistFromBytes,
b"<plist><integer>not real</integer></plist>")
def test_main():
support.run_unittest(TestPlistlib)
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
srm912/servo | tests/wpt/harness/wptrunner/metadata.py | 78 | 12836 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import sys
import tempfile
import types
import uuid
from collections import defaultdict
from mozlog import reader
from mozlog import structuredlog
import expected
import manifestupdate
import testloader
import wptmanifest
import wpttest
from vcs import git
manifest = None # Module that will be imported relative to test_root
logger = structuredlog.StructuredLogger("web-platform-tests")
def load_test_manifests(serve_root, test_paths):
do_delayed_imports(serve_root)
manifest_loader = testloader.ManifestLoader(test_paths, False)
return manifest_loader.load()
def update_expected(test_paths, serve_root, log_file_names,
rev_old=None, rev_new="HEAD", ignore_existing=False,
sync_root=None, property_order=None, boolean_properties=None):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run"""
manifests = load_test_manifests(serve_root, test_paths)
change_data = {}
if sync_root is not None:
if rev_old is not None:
rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
if rev_old is not None:
change_data = load_change_data(rev_old, rev_new, repo=sync_root)
expected_map_by_manifest = update_from_logs(manifests,
*log_file_names,
ignore_existing=ignore_existing,
property_order=property_order,
boolean_properties=boolean_properties)
for test_manifest, expected_map in expected_map_by_manifest.iteritems():
url_base = manifests[test_manifest]["url_base"]
metadata_path = test_paths[url_base]["metadata_path"]
write_changes(metadata_path, expected_map)
results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
return unexpected_changes(manifests, change_data, results_changed)
def do_delayed_imports(serve_root):
global manifest
from manifest import manifest
def files_in_repo(repo_root):
return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
def rev_range(rev_old, rev_new, symmetric=False):
joiner = ".." if not symmetric else "..."
return "".join([rev_old, joiner, rev_new])
def paths_changed(rev_old, rev_new, repo):
data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
for line in data.split("\n") if line.strip()]
output = set(lines)
return output
def load_change_data(rev_old, rev_new, repo):
changes = paths_changed(rev_old, rev_new, repo)
rv = {}
status_keys = {"M": "modified",
"A": "new",
"D": "deleted"}
# TODO: deal with renames
for item in changes:
rv[item[1]] = status_keys[item[0]]
return rv
def unexpected_changes(manifests, change_data, files_changed):
files_changed = set(files_changed)
root_manifest = None
for manifest, paths in manifests.iteritems():
if paths["url_base"] == "/":
root_manifest = manifest
break
else:
return []
rv = []
return [fn for fn, tests in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
# For each testrun
# Load all files and scan for the suite_start entry
# Build a hash of filename: properties
# For each different set of properties, gather all chunks
# For each chunk in the set of chunks, go through all tests
# for each test, make a map of {conditionals: [(platform, new_value)]}
# Repeat for each platform
# For each test in the list of tests:
# for each conditional:
# If all the new values match (or there aren't any) retain that conditional
# If any new values mismatch mark the test as needing human attention
# Check if all the RHS values are the same; if so collapse the conditionals
def update_from_logs(manifests, *log_filenames, **kwargs):
ignore_existing = kwargs.get("ignore_existing", False)
property_order = kwargs.get("property_order")
boolean_properties = kwargs.get("boolean_properties")
expected_map = {}
id_test_map = {}
for test_manifest, paths in manifests.iteritems():
expected_map_manifest, id_path_map_manifest = create_test_tree(
paths["metadata_path"],
test_manifest,
property_order=property_order,
boolean_properties=boolean_properties)
expected_map[test_manifest] = expected_map_manifest
id_test_map.update(id_path_map_manifest)
updater = ExpectedUpdater(manifests, expected_map, id_test_map,
ignore_existing=ignore_existing)
for log_filename in log_filenames:
with open(log_filename) as f:
updater.update_from_log(f)
for manifest_expected in expected_map.itervalues():
for tree in manifest_expected.itervalues():
for test in tree.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_expected()
test.coalesce_expected()
return expected_map
def directory_manifests(metadata_path):
rv = []
for dirpath, dirname, filenames in os.walk(metadata_path):
if "__dir__.ini" in filenames:
rel_path = os.path.relpath(dirpath, metadata_path)
rv.append(os.path.join(rel_path, "__dir__.ini"))
return rv
def write_changes(metadata_path, expected_map):
# First write the new manifest files to a temporary directory
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
write_new_expected(temp_path, expected_map)
# Keep all __dir__.ini files (these are not in expected_map because they
# aren't associated with a specific test)
keep_files = directory_manifests(metadata_path)
# Copy all files in the root to the temporary location since
# these cannot be ini files
keep_files.extend(item for item in os.listdir(metadata_path) if
not os.path.isdir(os.path.join(metadata_path, item)))
for item in keep_files:
dest_dir = os.path.dirname(os.path.join(temp_path, item))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(metadata_path, item),
os.path.join(temp_path, item))
# Then move the old manifest files to a new location
temp_path_2 = metadata_path + str(uuid.uuid4())
os.rename(metadata_path, temp_path_2)
# Move the new files to the destination location and remove the old files
os.rename(temp_path, metadata_path)
shutil.rmtree(temp_path_2)
def write_new_expected(metadata_path, expected_map):
# Serialize the data back to a file
for tree in expected_map.itervalues():
if not tree.is_empty:
manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
assert manifest_str != ""
path = expected.expected_path(metadata_path, tree.test_path)
dir = os.path.split(path)[0]
if not os.path.exists(dir):
os.makedirs(dir)
with open(path, "w") as f:
f.write(manifest_str)
class ExpectedUpdater(object):
def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
self.test_manifests = test_manifests
self.expected_tree = expected_tree
self.id_path_map = id_path_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
"test_status": self.test_status,
"test_end": self.test_end}
self.tests_visited = {}
self.test_cache = {}
def update_from_log(self, log_file):
self.run_info = None
log_reader = reader.read(log_file)
reader.each_log(log_reader, self.action_map)
def suite_start(self, data):
self.run_info = data["run_info"]
def test_id(self, id):
if type(id) in types.StringTypes:
return id
else:
return tuple(id)
def test_start(self, data):
test_id = self.test_id(data["test"])
try:
test_manifest, test = self.id_path_map[test_id]
expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
except KeyError:
print "Test not found %s, skipping" % test_id
return
self.test_cache[test_id] = expected_node
if test_id not in self.tests_visited:
if self.ignore_existing:
expected_node.clear_expected()
self.tests_visited[test_id] = set()
def test_status(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
subtest = test.get_subtest(data["subtest"])
self.tests_visited[test.id].add(data["subtest"])
result = test_cls.subtest_result_cls(
data["subtest"],
data["status"],
data.get("message"))
subtest.set_result(self.run_info, result)
def test_end(self, data):
test_id = self.test_id(data["test"])
test = self.test_cache.get(test_id)
if test is None:
return
test_cls = wpttest.manifest_test_cls[test.test_type]
if data["status"] == "SKIP":
return
result = test_cls.result_cls(
data["status"],
data.get("message"))
test.set_result(self.run_info, result)
del self.test_cache[test_id]
def create_test_tree(metadata_path, test_manifest, property_order=None,
boolean_properties=None):
expected_map = {}
id_test_map = {}
exclude_types = frozenset(["stub", "helper", "manual"])
include_types = set(manifest.item_types) - exclude_types
for test_path, tests in test_manifest.itertypes(*include_types):
expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
property_order=property_order,
boolean_properties=boolean_properties)
if expected_data is None:
expected_data = create_expected(test_manifest,
test_path,
tests,
property_order=property_order,
boolean_properties=boolean_properties)
for test in tests:
id_test_map[test.id] = (test_manifest, test)
expected_map[test] = expected_data
return expected_map, id_test_map
def create_expected(test_manifest, test_path, tests, property_order=None,
boolean_properties=None):
expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base,
property_order=property_order,
boolean_properties=boolean_properties)
for test in tests:
expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected
def load_expected(test_manifest, metadata_path, test_path, tests, property_order=None,
boolean_properties=None):
expected_manifest = manifestupdate.get_manifest(metadata_path,
test_path,
test_manifest.url_base,
property_order=property_order,
boolean_properties=boolean_properties)
if expected_manifest is None:
return
tests_by_id = {item.id: item for item in tests}
# Remove expected data for tests that no longer exist
for test in expected_manifest.iterchildren():
if not test.id in tests_by_id:
test.remove()
# Add tests that don't have expected data
for test in tests:
if not expected_manifest.has_test(test.id):
expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
return expected_manifest
| mpl-2.0 |
therewillbecode/ichnaea | ichnaea/data/monitor.py | 1 | 3667 | from collections import defaultdict
from datetime import timedelta
from sqlalchemy import func
from sqlalchemy.orm import load_only
from ichnaea.models import (
ApiKey,
OCIDCell,
)
from ichnaea import util
class ApiKeyLimits(object):
def __init__(self, task, session):
self.task = task
self.session = session
self.redis_client = task.redis_client
self.stats_client = task.stats_client
def __call__(self):
today = util.utcnow().strftime('%Y%m%d')
keys = self.redis_client.keys('apilimit:*:' + today)
if keys:
values = self.redis_client.mget(keys)
keys = [k.decode('utf-8').split(':')[1] for k in keys]
else:
values = []
names = {}
if keys:
query = (self.session.query(ApiKey)
.filter(ApiKey.valid_key.in_(keys))
.options(load_only('shortname')))
for api_key in query.all():
names[api_key.valid_key] = api_key.name
result = {}
for k, v in zip(keys, values):
name = names.get(k, k)
value = int(v)
result[name] = value
self.stats_client.gauge(
'api.limit', value, tags=['key:' + name])
return result
class ApiUsers(object):
def __init__(self, task):
self.task = task
self.redis_client = task.redis_client
self.stats_client = task.stats_client
def __call__(self):
days = {}
today = util.utcnow().date()
for i in range(0, 7):
day = today - timedelta(days=i)
days[i] = day.strftime('%Y-%m-%d')
metrics = defaultdict(list)
result = {}
for key in self.redis_client.scan_iter(
match='apiuser:*', count=100):
_, api_type, api_name, day = key.decode('ascii').split(':')
if day not in days.values():
# delete older entries
self.redis_client.delete(key)
continue
if day == days[0]:
metrics[(api_type, api_name, '1d')].append(key)
metrics[(api_type, api_name, '7d')].append(key)
for parts, keys in metrics.items():
api_type, api_name, interval = parts
value = self.redis_client.pfcount(*keys)
self.stats_client.gauge(
'%s.user' % api_type, value,
tags=['key:%s' % api_name, 'interval:%s' % interval])
result['%s:%s:%s' % parts] = value
return result
class OcidImport(object):
def __init__(self, task, session):
self.task = task
self.session = session
self.stats_client = task.stats_client
def __call__(self):
result = -1
now = util.utcnow()
query = self.session.query(func.max(OCIDCell.created))
max_created = query.first()[0]
if max_created:
# diff between now and the value, in milliseconds
diff = now - max_created
result = (diff.days * 86400 + diff.seconds) * 1000
self.stats_client.gauge('table', result, tags=['table:ocid_cell_age'])
return result
class QueueSize(object):
def __init__(self, task):
self.task = task
self.redis_client = task.redis_client
self.stats_client = task.stats_client
def __call__(self):
result = {}
for name in self.task.app.all_queues:
result[name] = value = self.redis_client.llen(name)
self.stats_client.gauge('queue', value, tags=['queue:' + name])
return result
| apache-2.0 |
audunv/andp | python/andp/view/web/widgets.py | 1 | 10766 | # -*- coding: utf-8; -*-
# Copyright (C) 2009 Østfold University College
#
# This file is part of ANDP.
#
# ANDP is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
"""
This module contains generic widgets for use on web pages.
A widget typically represents an input, and includes functionality for
parsing and validating data.
"""
import time, os, datetime
from mod_python import apache
import andp.view.web
class Widget(object):
"""
Abstract base class for all widgets.
"""
def __init__(self, parent, name, value = None):
self.parent = parent
self.name = name
self.value = value
def ParseInput(self, form):
"""
Parses form data, and returns tuple (status, data, message)
status: True if input data were valid, False otherwise
data: Parsed input data
message: Optional error message to be displayed to end user.
Empty string if everything's OK
"""
return (True, None, "")
class TimeWidget(Widget):
"""
A widget that lets user select a time.
"""
def __init__(self, parent, name, value = None):
super(self.__class__, self).__init__(parent, name, value)
if not self.value:
self.value = time.localtime()[3:5]
def GetHTML(self, form = None, ampm = False):
if form:
defHour = int(form.get(self.name + "_hour", ""))
defMin = int(form.get(self.name + "_min", ""))
try:
defAMPM = form[self.name + "_ampm"].value
except KeyError:
defAMPM = None
else:
if ampm:
if self.value[0] < 12:
defHour, defMin = self.value
defAMPM = "am"
else:
defHour = self.value[0] - 12
defMin = self.value[1]
defAMPM = "pm"
else:
defHour, defMin = self.value
defAMPM = False
html = '<select name="%s_hour" id="%s_hour">' % (self.name, self.name)
if ampm:
upperHour = 12
else:
upperHour = 24
for hour in xrange(0, upperHour):
if ampm and hour == 0:
showHour = 12
else:
showHour = hour
if hour == defHour:
html += '<option value="%02i" selected="1">%02i</option>' % (hour, showHour)
else:
html += '<option value="%02i">%02i</option>' % (hour, showHour)
html += '</select>'
html += ':'
html += '<select name="%s_min" id="%s_min">' % (self.name, self.name)
for mint in xrange(0, 60, 5):
# In case we get a value that isn't a multiple of five (shouldn't happen)
if mint == (defMin / 5) * 5:
html += '<option value="%02i" selected="1">%02i</option>' % (mint, mint)
else:
html += '<option value="%02i">%02i</option>' % (mint, mint)
html += '</select>\n'
if ampm:
html += '<select name="%s_ampm" id="%s_ampm">' % (self.name, self.name)
for ampmTxt in ["am", "pm"]:
if ampmTxt == defAMPM:
html += '<option value="%s" selected="1">%s</option>' % (ampmTxt, ampmTxt.upper())
else:
html += '<option value="%s">%s</option>' % (ampmTxt, ampmTxt.upper())
html += '</select>\n'
return html
def ParseInput(self, form):
try:
hourS = form[self.name + "_hour"].value
mintS = form[self.name + "_min"].value
except KeyError:
return (False, None, "You must specify a time")
try:
ampm = form[self.name + "_ampm"].value
except KeyError:
ampm = None
try:
hour = int(hourS)
mint = int(mintS)
except ValueError:
return (False, None, "Invalid time")
if ampm == "pm":
hour += 12
if hour < 0 or hour > 23 or mint < 0 or mint > 59:
return (False, None, "Invalid time")
return (True, (hour, mint, 0), "")
class DateWidget(Widget):
"""
Allows user to select a date.
"""
monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
shortMonthNames = [name[:3] for name in monthNames]
def __init__(self, parent, name, value = None):
super(self.__class__, self).__init__(parent, name, value)
if not self.value:
self.value = time.localtime()[:3]
def GetHTML(self, form = None):
if form:
defYear = int(form.get(self.name + "_year", ""))
defMonth = int(form.get(self.name + "_month", ""))
defDay = int(form.get(self.name + "_day", ""))
else:
defYear, defMonth, defDay = self.value
html = '<select name="%s_day" id="%s_day">\n' % (self.name, self.name)
for day in xrange(1, 32):
if day == defDay:
html += ' <option value="%i" selected="1">%i</option>\n' % (day, day)
else:
html += ' <option value="%i">%i</option>\n' % (day, day)
html += '</select>\n'
html += '<select name="%s_month" id="%s_month">\n' % (self.name, self.name)
for i in xrange(len(self.monthNames)):
monthName = self.monthNames[i]
if i + 1 == defMonth:
html += ' <option value="%i" selected="1">%s</option>\n' % (i + 1, monthName)
else:
html += ' <option value="%i">%s</option>\n' % (i + 1, monthName)
html += '</select>\n'
firstYear = time.gmtime(time.time() - 24 * 3600)[0]
html += '<select name="%s_year" id="%s_year">\n' % (self.name, self.name)
for year in xrange(firstYear, firstYear + 2):
if year == defYear:
html += ' <option value="%i" selected="1">%04i</option>\n' % (year, year)
else:
html += ' <option value="%i">%04i</option>\n' % (year, year)
html += '</select>\n'
return html
def ParseInput(self, form):
try:
dayS = form[self.name + "_day"].value
monthS = form[self.name + "_month"].value
yearS = form[self.name + "_year"].value
except KeyError:
return (False, None, "You must specify a date")
try:
day = int(dayS)
month = int(monthS)
year = int(yearS)
except ValueError:
return (False, None, "Invalid date")
if day < 1 or day > 31 or month < 1 or month > 12:
return (False, None, "Invalid date")
return (True, (year, month, day), "")
class SelectWidget(Widget):
def __init__(self, parent, name, value = None, options = []):
super(self.__class__, self).__init__(parent, name, value)
self.options = options
def GetHTML(self, form = None):
if form:
selected = form.get(self.name, None)
else:
selected = self.value
html = '<select name="%s" id="%s">\n' % (self.name, self.name)
for option, label in self.options:
if option == selected:
html += ' <option value="%s" selected="1">%s</option>\n' % (option, label)
else:
html += ' <option value="%s">%s</option>\n' % (option, label)
html += '</select>\n'
return html
def ParseInput(self, form):
return (True, form[self.name].value, "")
class RadioWidget(Widget):
def __init__(self, parent, name, value = None, options = []):
super(self.__class__, self).__init__(parent, name, value)
self.options = options
self.value = value
def GetHTML(self, form = None):
if form:
selected = form.get(self.name, "")
else:
if self.value == None:
selected = self.options[0][0]
else:
selected = self.value
inputs = []
for option, label in self.options:
if option == selected:
inputs.append('<input type="radio" name="%s" value="%s" checked="1" />%s\n' % (self.name, option, label))
else:
inputs.append('<input type="radio" name="%s" value="%s" />%s\n' % (self.name, option, label))
return "\n<br/>".join(inputs)
def ParseInput(self, form):
return (True, form[self.name].value, "")
class TextWidget(Widget):
"""
A simple one-line or multi-line textbox widget
"""
def __init__(self, req, name, value = "", required = False, errMsg = "Field is required", maxLen = 64, cols = 20, rows = 1):
super(TextWidget, self).__init__(req, name, value = value)
self.required = required
self.errMsg = errMsg
self.maxLen = maxLen
self.cols = cols
self.rows = rows
def GetHTML(self, form = None):
EH = andp.view.web.EH
if form:
try:
value = form[self.name].value
except KeyError:
value = self.value
else:
value = self.value
if self.rows > 1:
return '<textarea name="%s" cols="%i" rows="%i">%s</textarea>' % (self.name, self.cols, self.rows, EH(value))
else:
return '<input type="text" name="%s" value="%s" size="%i" />' % (self.name, EH(value), self.cols)
def ParseInput(self, form):
try:
value = form[self.name].value
except KeyError:
value = ""
if self.required and not value:
return (False, None, self.errMsg)
if len(value) > self.maxLen:
return (False, None, 'Too long (max %i characters)' % self.maxLen)
return (True, value, "")
| gpl-2.0 |
s0enke/boto | tests/integration/cloudformation/test_cert_verification.py | 126 | 1588 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on all service endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.cloudformation
class CloudFormationCertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
cloudformation = True
regions = boto.cloudformation.regions()
def sample_service_call(self, conn):
conn.describe_stacks()
| mit |
GDGND/evm | allauth/account/auth_backends.py | 57 | 2101 | from django.contrib.auth.backends import ModelBackend
from ..utils import get_user_model
from .utils import filter_users_by_email
from .app_settings import AuthenticationMethod
from . import app_settings
class AuthenticationBackend(ModelBackend):
def authenticate(self, **credentials):
ret = None
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
ret = self._authenticate_by_email(**credentials)
elif app_settings.AUTHENTICATION_METHOD \
== AuthenticationMethod.USERNAME_EMAIL:
ret = self._authenticate_by_email(**credentials)
if not ret:
ret = self._authenticate_by_username(**credentials)
else:
ret = self._authenticate_by_username(**credentials)
return ret
def _authenticate_by_username(self, **credentials):
username_field = app_settings.USER_MODEL_USERNAME_FIELD
username = credentials.get('username')
password = credentials.get('password')
User = get_user_model()
if not username_field or username is None or password is None:
return None
try:
# Username query is case insensitive
query = {username_field+'__iexact': username}
user = User.objects.get(**query)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def _authenticate_by_email(self, **credentials):
# Even though allauth will pass along `email`, other apps may
# not respect this setting. For example, when using
# django-tastypie basic authentication, the login is always
# passed as `username`. So let's place nice with other apps
# and use username as fallback
User = get_user_model()
email = credentials.get('email', credentials.get('username'))
if email:
for user in filter_users_by_email(email):
if user.check_password(credentials["password"]):
return user
return None
| mit |
ftomassetti/intellij-community | python/lib/Lib/site-packages/django/conf/locale/no/formats.py | 685 | 1657 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 |
SebastianLloret/CSCI-1310 | Assignments/Assignment8_Lloret/Assignment8_Lloret.py | 1 | 1401 | '''
Name: Sebastian Lloret
Recitation TA: Brennan Mcconnell
Assignment #: 8
'''
# Used to properly break the file into rows
import csv
def CreateDictionary(fileName):
slangDictionary = {}
# With just ensures a resource is cleaned even if exceptions are thrown.
# I had to use "rU" for universal newline support since textToEnglish is
# formatted with /r and not /n
with open(fileName, "rU") as f:
reader = csv.reader(f)
for row in reader:
# Row is a n-element list where n is the number of columns in each row
# ex. Row = [slang, translation]
slangDictionary[row[0]] = row[1]
return slangDictionary
if __name__ == "__main__":
# Call the function above
slangDictionary = CreateDictionary("textToEnglish.csv")
playing = True
# Grab user input(s)
while playing == True:
userInput = raw_input("Enter text abbreviations separated by spaces, or q to quit.\n")
if userInput == "q":
raise SystemExit
# Split the userInput at every space
arrayInputs = userInput.split()
# For every element in the array of inputs, check if we have the translation
for element in arrayInputs:
if element in slangDictionary:
print slangDictionary[element]
else:
print "NF"
| gpl-3.0 |
ewindisch/nova | nova/api/openstack/compute/contrib/console_output.py | 5 | 3746 | # Copyright 2011 OpenStack Foundation
# Copyright 2011 Grid Dynamics
# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'console_output')
class ConsoleOutputController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ConsoleOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('os-getConsoleOutput')
def get_console_output(self, req, id, body):
"""Get text console output."""
context = req.environ['nova.context']
authorize(context)
try:
instance = self.compute_api.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
try:
length = body['os-getConsoleOutput'].get('length')
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest(_('os-getConsoleOutput malformed '
'or missing from request body'))
if length is not None:
try:
# NOTE(maurosr): cast length into a string before cast into an
# integer to avoid thing like: int(2.5) which is 2 instead of
# raise ValueError like it would when we try int("2.5"). This
# can be removed once we have api validation landed.
int(str(length))
except ValueError:
raise webob.exc.HTTPBadRequest(_('Length in request body must '
'be an integer value'))
try:
output = self.compute_api.get_console_output(context,
instance,
length)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Unable to get console'))
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
msg = _("Unable to get console log, functionality not implemented")
raise webob.exc.HTTPNotImplemented(explanation=msg)
# XML output is not correctly escaped, so remove invalid characters
remove_re = re.compile('[\x00-\x08\x0B-\x1F]')
output = remove_re.sub('', output)
return {'output': output}
class Console_output(extensions.ExtensionDescriptor):
"""Console log output support, with tailing ability."""
name = "ConsoleOutput"
alias = "os-console-output"
namespace = ("http://docs.openstack.org/compute/ext/"
"os-console-output/api/v2")
updated = "2011-12-08T00:00:00+00:00"
def get_controller_extensions(self):
controller = ConsoleOutputController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 |
eahneahn/free | djangoproject/core/urls/__init__.py | 1 | 4601 | from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView, RedirectView
from django.conf import settings
#from django.views.generic.simple import redirect_to, direct_to_template
from django.shortcuts import render,redirect
urlpatterns = patterns('core.views.main_views',
url(r'^$', 'home'),
url(r'^home/$', RedirectView.as_view(url='/', permanent=True)),
url(r'^toggle_layout/$', 'toggle_layout'),
url(r'^stats/$', 'stats'),
url(r'^admail/$', 'admail'),
url(r'^mailtest/$', 'mailtest'),
url(r'^about/$', redirect, {'url': 'http://blog.freedomsponsors.org/about/'}),
url(r'^faq/$', redirect, {'url': 'http://blog.freedomsponsors.org/faq/'}),
url(r'^dev/$', redirect, {'url': 'http://blog.freedomsponsors.org/developers/'}),
url(r'^login/$', 'login'),
url(r'^logout/$', 'logout'),
url(r'^jslic$', render, {'template': 'core/jslic.html'}),
)
urlpatterns += patterns('core.views.issue_views',
url(r'^myissues/$', 'myissues'),
url(r'^issue/$', 'listIssues'),
url(r'^issue/rss$', 'listIssuesFeed'),
url(r'^issue/sponsor/submit$', 'sponsorIssue'),
url(r'^issue/sponsor$', 'addIssueForm'),
url(r'^issue/add/submit$', 'addIssue'),
url(r'^issue/kickstart/submit$', 'kickstartIssue'),
url(r'^issue/add/$', 'addIssueForm'),
url(r'^issue/edit/submit$', 'editIssue'),
url(r'^offer/(?P<offer_id>\d+)/pay$', 'payOfferForm'),
url(r'^offer/pay/submit$', 'payOffer'),
url(r'^issue/(?P<issue_id>\d+)/$', 'viewIssue'),
url(r'^issue/(?P<issue_id>\d+)/.*$', 'viewIssue'),
# url(r'^offer/(?P<offer_id>\d+)/$', 'viewOffer'),
# url(r'^offer/(?P<offer_id>\d+)/.*$', 'viewOffer'),
url(r'^offer/revoke/submit$', 'revokeOffer'),
url(r'^offer/edit/submit$', 'editOffer'),
url(r'^solution/add/submit$', 'addSolution'),
url(r'^solution/abort/submit$', 'abortSolution'),
url(r'^solution/resolve/submit$', 'resolveSolution'),
)
urlpatterns += patterns('',
url(r'^project/$', RedirectView.as_view(url='/project/', permanent=True)),
url(r'^project/(?P<project_id>\d+)/$', RedirectView.as_view(url='/project/%(project_id)s/', permanent=True)),
url(r'^project/(?P<project_id>\d+)/edit$', RedirectView.as_view(url='/project/%(project_id)s/edit', permanent=True)),
)
urlpatterns += patterns('core.views.comment_views',
url(r'^issue/comment/add/submit$', 'addIssueComment'),
url(r'^issue/comment/edit/submit$', 'editIssueComment'),
url(r'^issue/comment/(?P<comment_id>\d+)/history$', 'viewIssueCommentHistory'),
url(r'^offer/comment/(?P<comment_id>\d+)/history$', 'viewOfferCommentHistory'),
)
urlpatterns += patterns('', # TODO: how to use reverse_lazy here?
url(r'^watch/issue/(?P<issue_id>\d+)$', RedirectView.as_view(url='/issue/%(issue_id)s/watch', permanent=True)),
url(r'^unwatch/issue/(?P<issue_id>\d+)$', RedirectView.as_view(url='/issue/%(issue_id)s/unwatch', permanent=True)),
url(r'^watch/offer/(?P<offer_id>\d+)$', RedirectView.as_view(url='/offer/%(offer_id)s/watch', permanent=True)),
url(r'^unwatch/offer/(?P<offer_id>\d+)$', RedirectView.as_view(url='/offer/%(offer_id)s/unwatch', permanent=True)),
)
urlpatterns += patterns('core.views.paypal_views',
url(r'^paypal/cancel$', 'paypalCancel'),
url(r'^paypal/return$', 'paypalReturn'),
url(r'^paypal/'+settings.PAYPAL_IPNNOTIFY_URL_TOKEN+'$', 'paypalIPN'),
)
urlpatterns += patterns('core.views.bitcoin_views',
url(r'^bitcoin/'+settings.BITCOIN_IPNNOTIFY_URL_TOKEN+'$', 'bitcoinIPN'),
)
urlpatterns += patterns('',
url(r'^user/$', RedirectView.as_view(url='/user/', permanent=True)),
url(r'^user/(?P<user_id>\d+)/$', RedirectView.as_view(url='/user/%(user_id)s/', permanent=True)),
url(r'^user/(?P<user_id>\d+)/(?P<user_slug>.*)$', RedirectView.as_view(url='/user/%(user_id)s/%(user_slug)s', permanent=True)),
url(r'^user/edit$', RedirectView.as_view(url='/user/edit', permanent=True)),
)
urlpatterns += patterns('core.views.json_views',
url(r'^json/project$', 'project'),
url(r'^json/by_issue_url$', 'by_issue_url'),
url(r'^json/get_offers$', 'get_offers'),
url(r'^json/list_issue_cards', 'list_issue_cards'),
url(r'^json/add_tag', 'add_tag'),
url(r'^json/remove_tag', 'remove_tag'),
url(r'^json/latest_activity', 'latest_activity'),
url(r'^json/toggle_watch', 'toggle_watch'),
)
# urlpatterns += patterns('core.jiraviews',
# url(r'^issue/sponsor_jira$', 'sponsorJiraForm'),
# )
urlpatterns += patterns('',
url(r'^feedback$', RedirectView.as_view(url='/feedback', permanent=True)),
)
| agpl-3.0 |
godiard/speak | aiml/DefaultSubs.py | 9 | 3590 | """This file contains the default (English) substitutions for the
PyAIML kernel. These substitutions may be overridden by using the
Kernel.loadSubs(filename) method. The filename specified should refer
to a Windows-style INI file with the following format:
# lines that start with '#' are comments
# The 'gender' section contains the substitutions performed by the
# <gender> AIML tag, which swaps masculine and feminine pronouns.
[gender]
he = she
she = he
# and so on...
# The 'person' section contains the substitutions performed by the
# <person> AIML tag, which swaps 1st and 2nd person pronouns.
[person]
I = you
you = I
# and so on...
# The 'person2' section contains the substitutions performed by
# the <person2> AIML tag, which swaps 1st and 3nd person pronouns.
[person2]
I = he
he = I
# and so on...
# the 'normal' section contains subtitutions run on every input
# string passed into Kernel.respond(). It's mainly used to
# correct common misspellings, and to convert contractions
# ("WHAT'S") into a format that will match an AIML pattern ("WHAT
# IS").
[normal]
what's = what is
"""
defaultGender = {
# masculine -> feminine
"he": "she",
"him": "her",
"his": "her",
"himself": "herself",
# feminine -> masculine
"she": "he",
"her": "him",
"hers": "his",
"herself": "himself",
}
defaultPerson = {
# 1st->3rd (masculine)
"I": "he",
"me": "him",
"my": "his",
"mine": "his",
"myself": "himself",
# 3rd->1st (masculine)
"he":"I",
"him":"me",
"his":"my",
"himself":"myself",
# 3rd->1st (feminine)
"she":"I",
"her":"me",
"hers":"mine",
"herself":"myself",
}
defaultPerson2 = {
# 1st -> 2nd
"I": "you",
"me": "you",
"my": "your",
"mine": "yours",
"myself": "yourself",
# 2nd -> 1st
"you": "me",
"your": "my",
"yours": "mine",
"yourself": "myself",
}
# TODO: this list is far from complete
defaultNormal = {
"wanna": "want to",
"gonna": "going to",
"I'm": "I am",
"I'd": "I would",
"I'll": "I will",
"I've": "I have",
"you'd": "you would",
"you're": "you are",
"you've": "you have",
"you'll": "you will",
"he's": "he is",
"he'd": "he would",
"he'll": "he will",
"she's": "she is",
"she'd": "she would",
"she'll": "she will",
"we're": "we are",
"we'd": "we would",
"we'll": "we will",
"we've": "we have",
"they're": "they are",
"they'd": "they would",
"they'll": "they will",
"they've": "they have",
"y'all": "you all",
"can't": "can not",
"cannot": "can not",
"couldn't": "could not",
"wouldn't": "would not",
"shouldn't": "should not",
"isn't": "is not",
"ain't": "is not",
"don't": "do not",
"aren't": "are not",
"won't": "will not",
"weren't": "were not",
"wasn't": "was not",
"didn't": "did not",
"hasn't": "has not",
"hadn't": "had not",
"haven't": "have not",
"where's": "where is",
"where'd": "where did",
"where'll": "where will",
"who's": "who is",
"who'd": "who did",
"who'll": "who will",
"what's": "what is",
"what'd": "what did",
"what'll": "what will",
"when's": "when is",
"when'd": "when did",
"when'll": "when will",
"why's": "why is",
"why'd": "why did",
"why'll": "why will",
"it's": "it is",
"it'd": "it would",
"it'll": "it will",
} | gpl-3.0 |
sanyaade-g2g-repos/key-mon | src/keymon/shaped_window.py | 15 | 3172 | #!/usr/bin/python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a shaped window to show mouse events.
Thanks to mathias.gumz for the original code.
"""
import gobject
import gtk
import lazy_pixbuf_creator
class ShapedWindow(gtk.Window):
"""Create a window shaped as fname."""
def __init__(self, fname, scale=1.0, timeout=0.2):
gtk.Window.__init__(self)
self.connect('size-allocate', self._on_size_allocate)
self.set_decorated(False)
self.set_keep_above(True)
self.set_accept_focus(False)
self.scale = scale
self.shown = False
self.timeout = timeout
self.timeout_timer = None
self.name_fnames = {
'mouse' : [fname],
}
self.pixbufs = lazy_pixbuf_creator.LazyPixbufCreator(self.name_fnames,
self.scale)
self.pixbuf = self.pixbufs.get('mouse')
self.resize(self.pixbuf.get_width(), self.pixbuf.get_height())
# a pixmap widget to contain the pixmap
self.image = gtk.Image()
bitmap, self.mask = self.pixbuf.render_pixmap_and_mask()
self.image.set_from_pixmap(bitmap, self.mask)
self.image.show()
self.add(self.image)
def _on_size_allocate(self, win, unused_allocation):
"""Called when first allocated."""
# Set the window shape
win.shape_combine_mask(self.mask, 0, 0)
win.set_property('skip-taskbar-hint', True)
if not win.is_composited():
print 'Unable to fade the window'
else:
win.set_opacity(0.5)
def center_on_cursor(self, x=None, y=None):
if x is None or y is None:
root = gtk.gdk.screen_get_default().get_root_window()
x, y, _ = root.get_pointer()
w, h = self.get_size()
new_x, new_y = x - w/2, y - h/2
pos = self.get_position()
if pos[0] != new_x or pos[1] != new_y:
self.move(new_x, new_y)
self.show()
def show(self):
"""Show this mouse indicator and ignore awaiting fade away request."""
if self.timeout_timer and self.shown:
# There is a fade away request, ignore it
gobject.source_remove(self.timeout_timer)
self.timeout_timer = None
# This method only is called when mouse is pressed, so there will be a
# release and fade_away call, no need to set up another timer.
super(ShapedWindow, self).show()
def maybe_show(self):
if self.shown or not self.timeout_timer:
return
self.shown = True
self.show()
def fade_away(self):
"""Make the window fade in a little bit."""
# TODO this isn't doing any fading out
self.shown = False
self.timeout_timer = gobject.timeout_add(int(self.timeout * 1000), self.hide)
| apache-2.0 |
4eek/edx-platform | lms/djangoapps/certificates/views/support.py | 52 | 5649 | """
Certificate end-points used by the student support UI.
See lms/djangoapps/support for more details.
"""
import logging
from functools import wraps
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseServerError
)
from django.views.decorators.http import require_GET, require_POST
from django.db.models import Q
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from student.models import User, CourseEnrollment
from courseware.access import has_access
from util.json_request import JsonResponse
from certificates import api
log = logging.getLogger(__name__)
def require_certificate_permission(func):
"""
View decorator that requires permission to view and regenerate certificates.
"""
@wraps(func)
def inner(request, *args, **kwargs): # pylint:disable=missing-docstring
if has_access(request.user, "certificates", "global"):
return func(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return inner
@require_GET
@require_certificate_permission
def search_by_user(request):
"""
Search for certificates for a particular user.
Supports search by either username or email address.
Arguments:
request (HttpRequest): The request object.
Returns:
JsonResponse
Example Usage:
GET /certificates/[email protected]
Response: 200 OK
Content-Type: application/json
[
{
"username": "bob",
"course_key": "edX/DemoX/Demo_Course",
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
query = request.GET.get("query")
if not query:
return JsonResponse([])
try:
user = User.objects.get(Q(email=query) | Q(username=query))
except User.DoesNotExist:
return JsonResponse([])
certificates = api.get_certificates_for_user(user.username)
for cert in certificates:
cert["course_key"] = unicode(cert["course_key"])
cert["created"] = cert["created"].isoformat()
cert["modified"] = cert["modified"].isoformat()
return JsonResponse(certificates)
def _validate_regen_post_params(params):
"""
Validate request POST parameters to the regenerate certificates end-point.
Arguments:
params (QueryDict): Request parameters.
Returns: tuple of (dict, HttpResponse)
"""
# Validate the username
try:
username = params.get("username")
user = User.objects.get(username=username)
except User.DoesNotExist:
msg = _("User {username} does not exist").format(username=username)
return None, HttpResponseBadRequest(msg)
# Validate the course key
try:
course_key = CourseKey.from_string(params.get("course_key"))
except InvalidKeyError:
msg = _("{course_key} is not a valid course key").format(course_key=params.get("course_key"))
return None, HttpResponseBadRequest(msg)
return {"user": user, "course_key": course_key}, None
@require_POST
@require_certificate_permission
def regenerate_certificate_for_user(request):
"""
Regenerate certificates for a user.
This is meant to be used by support staff through the UI in lms/djangoapps/support
Arguments:
request (HttpRequest): The request object
Returns:
HttpResponse
Example Usage:
POST /certificates/regenerate
* username: "bob"
* course_key: "edX/DemoX/Demo_Course"
Response: 200 OK
"""
# Check the POST parameters, returning a 400 response if they're not valid.
params, response = _validate_regen_post_params(request.POST)
if response is not None:
return response
# Check that the course exists
course = modulestore().get_course(params["course_key"])
if course is None:
msg = _("The course {course_key} does not exist").format(course_key=params["course_key"])
return HttpResponseBadRequest(msg)
# Check that the user is enrolled in the course
if not CourseEnrollment.is_enrolled(params["user"], params["course_key"]):
msg = _("User {username} is not enrolled in the course {course_key}").format(
username=params["user"].username,
course_key=params["course_key"]
)
return HttpResponseBadRequest(msg)
# Attempt to regenerate certificates
try:
api.regenerate_user_certificates(params["user"], params["course_key"], course=course)
except: # pylint: disable=bare-except
# We are pessimistic about the kinds of errors that might get thrown by the
# certificates API. This may be overkill, but we're logging everything so we can
# track down unexpected errors.
log.exception(
"Could not regenerate certificates for user %s in course %s",
params["user"].id,
params["course_key"]
)
return HttpResponseServerError(_("An unexpected error occurred while regenerating certificates."))
log.info(
"Started regenerating certificates for user %s in course %s from the support page.",
params["user"].id, params["course_key"]
)
return HttpResponse(200)
| agpl-3.0 |
realms-team/solmanager | libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/configuration.py | 3 | 7415 | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import base64
import urllib3
try:
import httplib
except ImportError:
# for python3
import http.client as httplib
import sys
import logging
from six import iteritems
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""
Constructor
"""
# Default Base url
self.host = "https://localhost/manager/v1"
# Default api client
self.api_client = None
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("vmanager")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
@property
def logger_file(self):
"""
Gets the logger_file.
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""
Sets the logger_file.
If the logger_file is None, then add stream handler and remove file handler.
Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""
Gets the debug status.
"""
return self.__debug
@debug.setter
def debug(self, value):
"""
Sets the debug status.
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""
Gets the logger_format.
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""
Sets the logger_format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier]
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""
Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\
.get('authorization')
def auth_settings(self):
"""
Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'dust_basic':
{
'type': 'basic',
'in': 'header',
'key': 'Authorization',
'value': self.get_basic_auth_token()
},
}
def to_debug_report(self):
"""
Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.0\n"\
"SDK Package Version: 0.2".\
format(env=sys.platform, pyversion=sys.version)
| bsd-3-clause |
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_Cisco_IOS_XE_bgp_oper.py | 1 | 48604 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'BgpAfiSafiEnum' : _MetaInfoEnum('BgpAfiSafiEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'ipv4-mdt':'ipv4_mdt',
'ipv4-multicast':'ipv4_multicast',
'ipv4-unicast':'ipv4_unicast',
'ipv4-mvpn':'ipv4_mvpn',
'ipv4-flowspec':'ipv4_flowspec',
'ipv6-multicast':'ipv6_multicast',
'ipv6-unicast':'ipv6_unicast',
'ipv6-mvpn':'ipv6_mvpn',
'ipv6-flowspec':'ipv6_flowspec',
'l2vpn-vpls':'l2vpn_vpls',
'l2vpn-e-vpn':'l2vpn_e_vpn',
'nsap-unicast':'nsap_unicast',
'rtfilter-unicast':'rtfilter_unicast',
'vpnv4-multicast':'vpnv4_multicast',
'vpnv4-unicast':'vpnv4_unicast',
'vpnv6-unicast':'vpnv6_unicast',
'vpnv6-multicast':'vpnv6_multicast',
'vpnv4-flowspec':'vpnv4_flowspec',
'vpnv6-flowspec':'vpnv6_flowspec',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpOriginCodeEnum' : _MetaInfoEnum('BgpOriginCodeEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'origin-igp':'origin_igp',
'origin-egp':'origin_egp',
'origin-incomplete':'origin_incomplete',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpRpkiStatusEnum' : _MetaInfoEnum('BgpRpkiStatusEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'rpki-valid':'rpki_valid',
'rpki-invalid':'rpki_invalid',
'rpki-not-found':'rpki_not_found',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpModeEnum' : _MetaInfoEnum('BgpModeEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'active':'active',
'passive':'passive',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpRouteOptionEnum' : _MetaInfoEnum('BgpRouteOptionEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'bgp-all-routes':'bgp_all_routes',
'bgp-cidr-only-routes':'bgp_cidr_only_routes',
'bgp-dampened-routes':'bgp_dampened_routes',
'bgp-rib-fail-routes':'bgp_rib_fail_routes',
'bgp-injected-routes':'bgp_injected_routes',
'bgp-pending-routes':'bgp_pending_routes',
'bgp-inconsistent-routes':'bgp_inconsistent_routes',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpFsmStateEnum' : _MetaInfoEnum('BgpFsmStateEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'idle':'idle',
'connect':'connect',
'active':'active',
'opensent':'opensent',
'openconfirm':'openconfirm',
'established':'established',
'nonnegotiated':'nonnegotiated',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpLinkEnum' : _MetaInfoEnum('BgpLinkEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'internal':'internal',
'external':'external',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'TcpFsmStateEnum' : _MetaInfoEnum('TcpFsmStateEnum', 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper',
{
'closed':'closed',
'listen':'listen',
'synsent':'synsent',
'synrcvd':'synrcvd',
'established':'established',
'finwait1':'finwait1',
'finwait2':'finwait2',
'closewait':'closewait',
'lastack':'lastack',
'closing':'closing',
'timewait':'timewait',
}, 'Cisco-IOS-XE-bgp-oper', _yang_ns._namespaces['Cisco-IOS-XE-bgp-oper']),
'BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers',
False,
[
_MetaInfoClassMember('hold-time', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Hold time
''',
'hold_time',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('keepalive-interval', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' keepalive interval
''',
'keepalive_interval',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'negotiated-keepalive-timers',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent',
False,
[
_MetaInfoClassMember('keepalives', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KEEPALIVE messages
''',
'keepalives',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('notifications', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' NOTIFICATION messages
''',
'notifications',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('opens', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' OPEN messages
''',
'opens',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-refreshes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route refresh messages
''',
'route_refreshes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('updates', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' UPDATE messages
''',
'updates',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'sent',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received',
False,
[
_MetaInfoClassMember('keepalives', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' KEEPALIVE messages
''',
'keepalives',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('notifications', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' NOTIFICATION messages
''',
'notifications',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('opens', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' OPEN messages
''',
'opens',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-refreshes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route refresh messages
''',
'route_refreshes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('updates', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' UPDATE messages
''',
'updates',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'received',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.BgpNeighborCounters' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.BgpNeighborCounters',
False,
[
_MetaInfoClassMember('inq-depth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Input Q depth
''',
'inq_depth',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('outq-depth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Output Q depth
''',
'outq_depth',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('received', REFERENCE_CLASS, 'Received' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received',
[], [],
''' ''',
'received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('sent', REFERENCE_CLASS, 'Sent' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent',
[], [],
''' ''',
'sent',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-counters',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.Connection' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.Connection',
False,
[
_MetaInfoClassMember('last-reset', ATTRIBUTE, 'str' , None, None,
[], [],
''' since the peering session was last reset
''',
'last_reset',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'BgpModeEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpModeEnum',
[], [],
''' ''',
'mode',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('reset-reason', ATTRIBUTE, 'str' , None, None,
[], [],
''' The reason for the last reset
''',
'reset_reason',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'TcpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'TcpFsmStateEnum',
[], [],
''' TCP FSM state
''',
'state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-dropped', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of times that a valid session has failed
or been taken down
''',
'total_dropped',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-established', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of times a TCP and BGP connection has been
successfully established
''',
'total_established',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'connection',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.Transport' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.Transport',
False,
[
_MetaInfoClassMember('foreign-host', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('foreign-host', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('foreign-host', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Remote address to which the BGP session has
established
''',
'foreign_host',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('foreign-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Remote port used by the peer for the TCP session
''',
'foreign_port',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('local-host', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('local-host', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('local-host', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Local address used for the TCP session
''',
'local_host',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('local-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Local TCP port used for TCP session
''',
'local_port',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('mss', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Maximum Data segment size
''',
'mss',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('path-mtu-discovery', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ''',
'path_mtu_discovery',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'transport',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity.Sent' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity.Sent',
False,
[
_MetaInfoClassMember('bestpaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as best paths
''',
'bestpaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('current-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Current number of prefixes accepted
''',
'current_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('explicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of times that a prefix has been withdrawn
because it is no longer feasible
''',
'explicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('implicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' number of times that a prefix has been withdrawn
and readvertised
''',
'implicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('multipaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as multipaths
''',
'multipaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total number of prefixes accepted
''',
'total_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'sent',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity.Received' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity.Received',
False,
[
_MetaInfoClassMember('bestpaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as best paths
''',
'bestpaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('current-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Current number of prefixes accepted
''',
'current_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('explicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of times that a prefix has been withdrawn
because it is no longer feasible
''',
'explicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('implicit-withdraw', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' number of times that a prefix has been withdrawn
and readvertised
''',
'implicit_withdraw',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('multipaths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of received prefixes installed as multipaths
''',
'multipaths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Total number of prefixes accepted
''',
'total_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'received',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor.PrefixActivity' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor.PrefixActivity',
False,
[
_MetaInfoClassMember('received', REFERENCE_CLASS, 'Received' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity.Received',
[], [],
''' ''',
'received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('sent', REFERENCE_CLASS, 'Sent' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity.Sent',
[], [],
''' ''',
'sent',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'prefix-activity',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors.Neighbor' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors.Neighbor',
False,
[
_MetaInfoClassMember('afi-safi', REFERENCE_ENUM_CLASS, 'BgpAfiSafiEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpAfiSafiEnum',
[], [],
''' ''',
'afi_safi',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'vrf_name',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('neighbor-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'neighbor_id',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('bgp-neighbor-counters', REFERENCE_CLASS, 'BgpNeighborCounters' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.BgpNeighborCounters',
[], [],
''' ''',
'bgp_neighbor_counters',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-version', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' BGP version being used to communicate with the
remote router
''',
'bgp_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('connection', REFERENCE_CLASS, 'Connection' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.Connection',
[], [],
''' ''',
'connection',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'description',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('installed-prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' number of installed prefixes
''',
'installed_prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('last-read', ATTRIBUTE, 'str' , None, None,
[], [],
''' since BGP last received a message to this neighbor
''',
'last_read',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('last-write', ATTRIBUTE, 'str' , None, None,
[], [],
''' since BGP last sent a message from this neighbor
''',
'last_write',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('link', REFERENCE_ENUM_CLASS, 'BgpLinkEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpLinkEnum',
[], [],
''' ''',
'link',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('negotiated-cap', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Information for bgp neighbor session negotiated
capabilities
''',
'negotiated_cap',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('negotiated-keepalive-timers', REFERENCE_CLASS, 'NegotiatedKeepaliveTimers' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers',
[], [],
''' ''',
'negotiated_keepalive_timers',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefix-activity', REFERENCE_CLASS, 'PrefixActivity' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.PrefixActivity',
[], [],
''' ''',
'prefix_activity',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('session-state', REFERENCE_ENUM_CLASS, 'BgpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpFsmStateEnum',
[], [],
''' ''',
'session_state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('transport', REFERENCE_CLASS, 'Transport' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor.Transport',
[], [],
''' ''',
'transport',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('up-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' How long the bgp session has been up since
the sessioin was established
''',
'up_time',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.Neighbors' : {
'meta_info' : _MetaInfoClass('BgpState.Neighbors',
False,
[
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors.Neighbor',
[], [],
''' ''',
'neighbor',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'neighbors',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Prefixes' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Prefixes',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'prefixes',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Path' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Path',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'path',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.AsPath' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.AsPath',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'as-path',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.RouteMap' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.RouteMap',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'route-map',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.FilterList' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.FilterList',
False,
[
_MetaInfoClassMember('memory-usage', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total memory usage in byte
''',
'memory_usage',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-entries', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' total prefix entires
''',
'total_entries',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'filter-list',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.Activities' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.Activities',
False,
[
_MetaInfoClassMember('paths', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'paths',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('scan-interval', ATTRIBUTE, 'str' , None, None,
[], [],
''' scan interval in second
''',
'scan_interval',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'activities',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary',
False,
[
_MetaInfoClassMember('id', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'id',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('bgp-version', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ''',
'bgp_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('input-queue', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'input_queue',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('messages-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'messages_received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('messages-sent', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'messages_sent',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('output-queue', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'output_queue',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes-received', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'prefixes_received',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'BgpFsmStateEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpFsmStateEnum',
[], [],
''' ''',
'state',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('up-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'up_time',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-summary',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries',
False,
[
_MetaInfoClassMember('bgp-neighbor-summary', REFERENCE_LIST, 'BgpNeighborSummary' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary',
[], [],
''' ''',
'bgp_neighbor_summary',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-neighbor-summaries',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies.AddressFamily' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies.AddressFamily',
False,
[
_MetaInfoClassMember('afi-safi', REFERENCE_ENUM_CLASS, 'BgpAfiSafiEnum' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpAfiSafiEnum',
[], [],
''' ''',
'afi_safi',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' ''',
'vrf_name',
'Cisco-IOS-XE-bgp-oper', True),
_MetaInfoClassMember('activities', REFERENCE_CLASS, 'Activities' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Activities',
[], [],
''' BGP activity information
''',
'activities',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('as-path', REFERENCE_CLASS, 'AsPath' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.AsPath',
[], [],
''' ''',
'as_path',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-neighbor-summaries', REFERENCE_CLASS, 'BgpNeighborSummaries' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries',
[], [],
''' Summary of neighbor
''',
'bgp_neighbor_summaries',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('bgp-table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' BGP table version number
''',
'bgp_table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('filter-list', REFERENCE_CLASS, 'FilterList' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.FilterList',
[], [],
''' ''',
'filter_list',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('path', REFERENCE_CLASS, 'Path' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Path',
[], [],
''' ''',
'path',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('prefixes', REFERENCE_CLASS, 'Prefixes' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.Prefixes',
[], [],
''' ''',
'prefixes',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('route-map', REFERENCE_CLASS, 'RouteMap' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily.RouteMap',
[], [],
''' ''',
'route_map',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('router-id', REFERENCE_UNION, 'str' , None, None,
[], [],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False, [
_MetaInfoClassMember('router-id', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('router-id', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' ''',
'router_id',
'Cisco-IOS-XE-bgp-oper', False),
]),
_MetaInfoClassMember('routing-table-version', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Routing table version number
''',
'routing_table_version',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('total-memory', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' ''',
'total_memory',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'address-family',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState.AddressFamilies' : {
'meta_info' : _MetaInfoClass('BgpState.AddressFamilies',
False,
[
_MetaInfoClassMember('address-family', REFERENCE_LIST, 'AddressFamily' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies.AddressFamily',
[], [],
''' ''',
'address_family',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'address-families',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
'BgpState' : {
'meta_info' : _MetaInfoClass('BgpState',
False,
[
_MetaInfoClassMember('address-families', REFERENCE_CLASS, 'AddressFamilies' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.AddressFamilies',
[], [],
''' ''',
'address_families',
'Cisco-IOS-XE-bgp-oper', False),
_MetaInfoClassMember('neighbors', REFERENCE_CLASS, 'Neighbors' , 'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper', 'BgpState.Neighbors',
[], [],
''' ''',
'neighbors',
'Cisco-IOS-XE-bgp-oper', False),
],
'Cisco-IOS-XE-bgp-oper',
'bgp-state',
_yang_ns._namespaces['Cisco-IOS-XE-bgp-oper'],
'ydk.models.cisco_ios_xe.Cisco_IOS_XE_bgp_oper'
),
},
}
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters.Sent']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters.Received']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity.Sent']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity.Received']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.NegotiatedKeepaliveTimers']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.BgpNeighborCounters']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.Connection']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.Transport']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor.PrefixActivity']['meta_info'].parent =_meta_table['BgpState.Neighbors.Neighbor']['meta_info']
_meta_table['BgpState.Neighbors.Neighbor']['meta_info'].parent =_meta_table['BgpState.Neighbors']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries.BgpNeighborSummary']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Prefixes']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Path']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.AsPath']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.RouteMap']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.FilterList']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.Activities']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily.BgpNeighborSummaries']['meta_info'].parent =_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info']
_meta_table['BgpState.AddressFamilies.AddressFamily']['meta_info'].parent =_meta_table['BgpState.AddressFamilies']['meta_info']
_meta_table['BgpState.Neighbors']['meta_info'].parent =_meta_table['BgpState']['meta_info']
_meta_table['BgpState.AddressFamilies']['meta_info'].parent =_meta_table['BgpState']['meta_info']
| apache-2.0 |
tcmitchell/geni-tools | src/gcf/geni/am/aggregate.py | 3 | 3386 | #----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from __future__ import absolute_import
from .resource import Resource
class Aggregate(object):
def __init__(self):
self.resources = []
self.containers = {} # of resources, not slivers
def add_resources(self, resources):
self.resources.extend(resources)
def catalog(self, container=None):
if container:
if container in self.containers:
return self.containers[container]
else:
return []
else:
return self.resources
def allocate(self, container, resources):
if container not in self.containers:
self.containers[container] = []
for r in resources:
self.containers[container].append(r)
def deallocate(self, container, resources):
if container and not self.containers.has_key(container):
# Be flexible: if a container is specified but unknown
# ignore the call
return
if container and resources:
# deallocate the given resources from the container
for r in resources:
self.containers[container].remove(r)
elif container:
# deallocate all the resources in the container
container_resources = list(self.containers[container])
for r in container_resources:
self.containers[container].remove(r)
elif resources:
# deallocate the resources from their container
for r in resources:
for c in self.containers.values():
if r in c:
c.remove(r)
# Finally, check if container is empty. If so, delete it.
# Note cache the keys because we will be modifying the dict
# inside the loop
allkeys = self.containers.keys()
for k in allkeys:
if not self.containers[k]:
del self.containers[k]
def stop(self, container):
# Mark the resources as 'SHUTDOWN'
if container in self.containers:
for r in self.containers[container]:
r.status = Resource.STATUS_SHUTDOWN
| mit |
nicememory/pie | pyglet/pyglet/gl/glxext_mesa.py | 46 | 2050 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''This file is currently hand-coded; I don't have a MESA header file to build
off.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
glXSwapIntervalMESA = _link_function('glXSwapIntervalMESA', c_int, [c_int], 'MESA_swap_control')
| apache-2.0 |
jpmpentwater/cvxpy | examples/extensions/ncvx/branch_and_bound.py | 12 | 3946 | import cvxopt
import cvxpy.problems.problem as problem
import cvxpy.settings as s
from boolean import Boolean
def branch(booleans):
bool_vals = (b for b in booleans if not b.fix_values)
# pick *a* boolean variable to branch on
# choose the most ambivalent one (smallest distance to 0.5)
# NOTE: if there are no boolean variables, will never branch
return min(bool_vals, key=lambda x: abs(x.value - 0.5))
def bound(prob, booleans):
# relax boolean constraints
for bool_var in booleans: bool_var.relax()
# solves relaxation
lower_bound = prob._solve()
if isinstance(lower_bound, str):
lower_bound = float('inf')
# round boolean variables and re-solve to obtain upper bound
for bool_var in booleans: bool_var.round()
upper_bound = prob._solve()
if isinstance(upper_bound, str):
upper_bound = float('inf')
return {'gap': upper_bound - lower_bound,
'ub': upper_bound,
'lb': lower_bound,
'obj': upper_bound,
'sol': map(lambda x: x.value, booleans)}
def solve_wrapper(prob, i, booleans, depth, epsilon):
if i > depth: return None
# branch
branch_var = branch(booleans)
# try true branch
branch_var.set(True)
true_branch = bound(prob, booleans)
# try false branch
branch_var.set(False)
false_branch = bound(prob, booleans)
# keep track of best objective so far
if true_branch['obj'] < false_branch['obj']:
solution = true_branch
else:
solution = false_branch
# update the bound
solution['lb'] = min(true_branch['lb'],false_branch['lb'])
solution['ub'] = min(true_branch['ub'],false_branch['ub'])
# check if gap is small enough
solution['gap'] = solution['ub'] - solution['lb']
if solution['gap'] < epsilon:
branch_var.unset()
return solution
# if the gap isn't small enough, we will choose a branch to go down
def take_branch(true_or_false):
branch_var.set(true_or_false)
if true_or_false is True: branch_bools = true_branch['sol']
else: branch_bools = false_branch['sol']
# restore the values into the set of booleans
for b, value in zip(booleans,branch_bools):
b.save_value(value)
return solve_wrapper(prob, i+1, booleans, depth, epsilon)
# partition based on lower bounds
if true_branch['lb'] < false_branch['lb']:
true_subtree = take_branch(True)
false_subtree = take_branch(False)
else:
false_subtree = take_branch(False)
true_subtree = take_branch(True)
# propagate best solution up the tree
if true_subtree and false_subtree:
if true_subtree['obj'] < false_subtree['obj']:
return true_subtree
return false_subtree
if not false_subtree and true_subtree: return true_subtree
if not true_subtree and false_subtree: return false_subtree
# return best guess so far
return solution
def branch_and_bound(self, depth=5, epsilon=1e-3):
objective, constr_map = self.canonicalize()
dims = self._format_for_solver(constr_map, s.ECOS)
variables = self.objective.variables()
for constr in self.constraints:
variables += constr.variables()
booleans = [v for v in variables if isinstance(v, Boolean)]
self.constraints.extend(b._LB <= b for b in booleans)
self.constraints.extend(b <= b._UB for b in booleans)
result = bound(self, booleans)
# check if gap is small enough
if result['gap'] < epsilon:
return result['obj']
result = solve_wrapper(self, 0, booleans, depth, epsilon)
# set the boolean values to the solution
for b, value in zip(booleans, result['sol']):
b.save_value(value)
b.fix_values = cvxopt.matrix(True, b.size)
return result['obj']
# add branch and bound a solution method
problem.Problem.register_solve("branch and bound", branch_and_bound)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.