code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
import numpy as np
import os
def get_data_from_file_polarity(fname):
labels, sentences = [], []
with open(fname, 'rb') as f:
for line in f:
label, text = line.strip().split(' ', 1)
text = text.split(' ')
labels.append((int(label) + 1) / 2)
sentences.append(text)
labels = np.ravel(labels)
return sentences, labels | henryre/shalo | utils/parse_data.py | Python | apache-2.0 | 386 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from openerp.tools.translate import quote, unquote, xml_translate
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text">Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-menu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
| minhphung171093/GreenERP | openerp/addons/base/tests/test_translate.py | Python | gpl-3.0 | 6,065 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitdealds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitdealds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitdeald/bitdeal-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: bitdealds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "bitdeald"),
help="bitdeald binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("LITECOIND", "bitdeald"),
help="bitdeald binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| bitdeal/bitdeal | qa/rpc-tests/test_framework/test_framework.py | Python | mit | 7,473 |
"""
WSGI config for roleplaying project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "roleplaying.settings")
application = get_wsgi_application()
| svamp/rp_management | roleplaying/wsgi.py | Python | gpl-3.0 | 399 |
from collections import namedtuple
import contextlib
import logging
import os
import math
import shutil
import subprocess
import tempfile
import ffmpeg
logging.basicConfig(
format="%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d:%H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
EncodingParameters = namedtuple(
"EncodingParameters", ["width", "height", "fps"]
)
def log_ffmpeg_error(e, action):
logger.info(f"Error (in action {action}):")
if e.stdout:
logger.info("stdout:")
logger.info(e.stdout.decode())
logger.info("======")
if e.stderr:
logger.error("stderr:")
logger.error(e.stderr.decode())
logger.error("======")
def save_file(tmp_folder, instance_id, file_to_save):
"""
Save given flask file in given path. This function should only be used for
temporary storage.
"""
extension = file_to_save.filename[-4:]
file_name = instance_id + extension.lower() + ".tmp"
file_path = os.path.join(tmp_folder, file_name)
file_to_save.save(file_path)
return file_path
def generate_thumbnail(movie_path):
"""
Generate a thumbnail to represent the movie given at movie path. It
takes a picture at the first frame of the movie.
"""
folder_path = os.path.dirname(movie_path)
file_source_name = os.path.basename(movie_path)
file_target_name = "%s.png" % file_source_name[:-4]
file_target_path = os.path.join(folder_path, file_target_name)
try:
ffmpeg.input(movie_path, ss="00:00:00").output(
file_target_path, vframes=1
).run(quiet=True)
except ffmpeg._run.Error as e:
log_ffmpeg_error(e, "generate_thumbnail")
raise (e)
return file_target_path
def generate_tile(movie_path):
pass
def get_movie_size(movie_path):
"""
Returns movie resolution (extract a frame and returns its size).
"""
try:
probe = ffmpeg.probe(movie_path)
except ffmpeg._run.Error as e:
log_ffmpeg_error(e, "get_movie_size")
raise (e)
video = next(
(
stream
for stream in probe["streams"]
if stream["codec_type"] == "video"
),
None,
)
width = int(video["width"])
height = int(video["height"])
return (width, height)
def normalize_encoding(
movie_path, task, file_target_path, fps, b, width, height
):
logger.info(task)
stream = ffmpeg.input(movie_path)
stream = ffmpeg.output(
stream.video,
stream.audio,
file_target_path,
pix_fmt="yuv420p",
format="mp4",
r=fps,
b=b,
preset="slow",
vcodec="libx264",
color_primaries=1,
color_trc=1,
colorspace=1,
movflags="+faststart",
s="%sx%s" % (width, height),
)
try:
logger.info(f"ffmpeg {' '.join(stream.get_args())}")
stream.run(quiet=False, capture_stderr=True, overwrite_output=True)
except ffmpeg._run.Error as e:
log_ffmpeg_error(e, task)
raise (e)
def normalize_movie(movie_path, fps, width, height):
"""
Normalize movie using resolution, width and height given in parameter.
Generates a high def movie and a low def movie.
"""
folder_path = os.path.dirname(movie_path)
file_source_name = os.path.basename(movie_path)
file_target_name = "%s.mp4" % file_source_name[:-8]
file_target_path = os.path.join(folder_path, file_target_name)
low_file_target_name = "%s_low.mp4" % file_source_name[:-8]
low_file_target_path = os.path.join(folder_path, low_file_target_name)
(w, h) = get_movie_size(movie_path)
resize_factor = w / h
if width is None:
width = math.floor(resize_factor * height)
if width % 2 == 1:
width = width + 1
if height % 2 == 1:
height = height + 1
err = None
if not has_soundtrack(movie_path):
error_code, _, err = add_empty_soundtrack(movie_path)
if error_code != 0:
return file_target_path, low_file_target_path, err
else:
err = None
# High def version
normalize_encoding(
movie_path,
"Compute high def version",
file_target_path,
fps,
"28M",
width,
height,
)
# Low def version
low_width = 1280
low_height = math.floor((height / width) * low_width)
if low_height % 2 == 1:
low_height = low_height + 1
normalize_encoding(
movie_path,
"Compute low def version",
low_file_target_path,
fps,
"1M",
low_width,
low_height,
)
return file_target_path, low_file_target_path, err
def add_empty_soundtrack(file_path, try_count=1):
extension = file_path.split(".")[-1]
if extension == "tmp":
extension = file_path.split(".")[-2]
tmp_file_path = file_path + "_empty_audio." + extension
with contextlib.suppress(FileNotFoundError):
os.remove(tmp_file_path)
args = [
"ffmpeg",
"-hide_banner",
"-f",
"lavfi",
"-i",
"anullsrc",
"-i",
file_path,
"-c:v",
"copy",
"-c:a",
"aac",
"-map",
"0:a",
"-map",
"1:v",
"-shortest",
tmp_file_path,
]
logger.info(f"Launch ffmpeg with args: {' '.join(args)}")
sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, error = sp.communicate()
err = None
if error:
err = "\n".join(str(error).split("\\n"))
logger.info(f"add_empty_soundtrack.sp.returncode: {sp.returncode}")
if sp.returncode == 0:
shutil.copyfile(tmp_file_path, file_path)
else:
logger.error(f"Err in soundtrack: {err}")
logger.error(f"Err code: {sp.returncode}")
if try_count <= 1:
(width, height) = get_movie_size(file_path)
if height % 2 == 1:
height = height + 1
stream = ffmpeg.input(file_path)
stream = ffmpeg.output(
stream.video,
tmp_file_path,
pix_fmt="yuv420p",
format="mp4",
preset="slow",
vcodec="libx264",
color_primaries=1,
color_trc=1,
colorspace=1,
movflags="+faststart",
s="%sx%s" % (width, height),
)
try:
logger.info(f"ffmpeg {' '.join(stream.get_args())}")
stream.run(
quiet=False, capture_stderr=True, overwrite_output=True
)
except ffmpeg._run.Error as e:
log_ffmpeg_error(
e,
"Try to convert video after fisrt add_empty_soundtrack fail",
)
raise (e)
shutil.copyfile(tmp_file_path, file_path)
return add_empty_soundtrack(file_path, try_count=2)
return sp.returncode, out, err
def has_soundtrack(file_path):
try:
audio = ffmpeg.probe(file_path, select_streams="a")
except ffmpeg._run.Error as e:
log_ffmpeg_error(e, "has_soundtrack")
raise (e)
return len(audio["streams"]) > 0
def build_playlist_movie(
concat, tmp_file_paths, movie_file_path, width, height, fps
):
"""
Build a single movie file from a playlist.
"""
in_files = []
result = {"message": "", "success": False}
if len(tmp_file_paths) > 0:
# Get movie dimensions
(first_movie_file_path, _) = tmp_file_paths[0]
if width is None:
(width, height) = get_movie_size(first_movie_file_path)
# Clean empty audio tracks
for tmp_file_path, file_name in tmp_file_paths:
if not has_soundtrack(tmp_file_path):
ret, _, err = add_empty_soundtrack(tmp_file_path)
if err:
result["message"] += "%s\n" % err
if ret != 0:
return result
in_files.append(tmp_file_path)
# Run concatenation
concat_result = concat(in_files, movie_file_path, width, height, fps)
if concat_result.get("message"):
result["message"] += concat_result.get("message")
result["success"] = concat_result.get("success", True)
return result
def concat_demuxer(in_files, output_path, *args):
"""
Concatenate media files with exactly the same codec and codec
parameters. Different container formats can be used and it can be used
with any container formats.
"""
for input_path in in_files:
try:
info = ffmpeg.probe(input_path)
except ffmpeg._run.Error as e:
log_ffmpeg_error(e, "concat_demuxer")
raise (e)
streams = info["streams"]
if len(streams) != 2:
return {
"success": False,
"message": "%s has an unexpected stream number (%s)"
% (input_path, len(streams)),
}
stream_infos = {streams[0]["codec_type"], streams[1]["codec_type"]}
if stream_infos != {"video", "audio"}:
return {
"success": False,
"message": "%s has unexpected stream type (%s)"
% (
input_path,
{streams[0]["codec_type"], streams[1]["codec_type"]},
),
}
video_index = [
x["index"] for x in streams if x["codec_type"] == "video"
][0]
if video_index != 0:
return {
"success": False,
"message": "%s has an unexpected stream order" % input_path,
}
with tempfile.NamedTemporaryFile(mode="w") as temp:
for input_path in in_files:
temp.write("file '%s'\n" % input_path)
temp.flush()
stream = ffmpeg.input(temp.name, format="concat", safe=0)
stream = ffmpeg.output(
stream.video, stream.audio, output_path, c="copy"
)
return run_ffmpeg(stream, "-xerror")
def concat_filter(in_files, output_path, width, height, *args):
"""
Concatenate media files with different codecs or different codec
properties
"""
streams = []
for input_path in in_files:
in_file = ffmpeg.input(input_path)
streams.append(
in_file["v"].filter("setsar", "1/1").filter("scale", width, height)
)
streams.append(in_file["a"])
joined = ffmpeg.concat(*streams, v=1, a=1).node
video = joined[0]
audio = joined[1]
stream = ffmpeg.output(audio, video, output_path)
return run_ffmpeg(stream)
def run_ffmpeg(stream, *args):
"""
Run ffmpeg and handles the result by creating a dict containing a success
flag and a error message if success is set to False.
"""
result = {}
try:
stream.overwrite_output().run(cmd=("ffmpeg",) + args)
result["success"] = True
except ffmpeg._run.Error as e:
log_ffmpeg_error(e, "run_ffmpeg/ffmpeg._run.Error")
result["success"] = False
result["message"] = str(e)
except Exception as e:
logger.error(e)
result["success"] = False
result["message"] = str(e)
return result
| cgwire/zou | zou/utils/movie.py | Python | agpl-3.0 | 11,448 |
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
FAIL = "FAIL"
PASS = "PASS"
OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
FLAKY = "FLAKY"
NO_VARIANTS = "NO_VARIANTS"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FLAKY, FAIL_OK,
NO_VARIANTS, PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
PASS_OR_FAIL: [PASS, FAIL]}
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
"arm", "arm64", "ia32", "mips", "mipsel", "x64", "x87", "nacl_ia32",
"nacl_x64", "macos", "windows", "linux"]:
VARIABLES[var] = var
def DoSkip(outcomes):
return SKIP in outcomes
def IsSlow(outcomes):
return SLOW in outcomes
def OnlyStandardVariant(outcomes):
return NO_VARIANTS in outcomes
def IsFlaky(outcomes):
return FLAKY in outcomes
def IsPassOrFail(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
def IsFailOk(outcomes):
return (FAIL in outcomes) and (OKAY in outcomes)
def _AddOutcome(result, new):
global DEFS
if new in DEFS:
mapped = DEFS[new]
if type(mapped) == list:
for m in mapped:
_AddOutcome(result, m)
elif type(mapped) == str:
_AddOutcome(result, mapped)
else:
result.add(new)
def _ParseOutcomeList(rule, outcomes, target_dict, variables):
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
_AddOutcome(result, item)
elif type(item) == list:
if not eval(item[0], variables): continue
for outcome in item[1:]:
assert type(outcome) == str
_AddOutcome(result, outcome)
else:
assert False
if len(result) == 0: return
if rule in target_dict:
target_dict[rule] |= result
else:
target_dict[rule] = result
def ReadStatusFile(path, variables):
with open(path) as f:
global KEYWORDS
contents = eval(f.read(), KEYWORDS)
rules = {}
wildcards = {}
variables.update(VARIABLES)
for section in contents:
assert type(section) == list
assert len(section) == 2
if not eval(section[0], variables): continue
section = section[1]
assert type(section) == dict
for rule in section:
assert type(rule) == str
if rule[-1] == '*':
_ParseOutcomeList(rule, section[rule], wildcards, variables)
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
| nextsmsversion/macchina.io | platform/JS/V8/v8-3.28.4/tools/testrunner/local/statusfile.py | Python | apache-2.0 | 4,397 |
import itertools
import subprocess
import sys
#http://pastebin.com/zj72xk4N
#run when system password box is showing eg. keychain password dialog
#apple script for automating dialog box input
sys_script = '''
tell application "System Events" to tell process "SecurityAgent"
set value of text field 1 of window 1 to $(PASS)
click button 1 of group 1 of window 1
end tell
'''
#fill this array with chars for combination
keys = ['s','t','a','r','t']
def automate_login():
for l in xrange(0, len(keys)+1):
for subset in itertools.permutations(keys, l):
guess = ''.join(subset)
tmp = sys_script.replace('$(PASS)', '"%s"' % guess)
try:
subprocess.check_output('osascript -e \'%s\'' % tmp, shell=True)
sys.stdout.write('\rtrying %s ' % guess)
sys.stdout.flush()
except subprocess.CalledProcessError:
print('\nfailed')
return
return
automate_login() | Jacobious52/PythonLab | osxpasscrack.py | Python | gpl-2.0 | 886 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import autoslug.fields
class Migration(migrations.Migration):
dependencies = [
('characters', '0005_auto_20150408_0028'),
]
operations = [
migrations.AlterField(
model_name='character',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False),
),
]
| Tengro/Redlands | RedPerm/characters/migrations/0006_auto_20150410_1408.py | Python | bsd-2-clause | 443 |
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
cyl0 = chigger.geometric.LineSource(point1=[0,0,0], point2=[0,1,0], data=[1, 2, 4, 8, 16], cmap='viridis')
cyls = chigger.base.ChiggerResult(cyl0)
window = chigger.RenderWindow(cyls, size=[300,300], test=True)
window.write('line_source_data.png')
window.start()
| nuclear-wizard/moose | python/chigger/tests/geometric/line_source/line_source_data.py | Python | lgpl-2.1 | 645 |
# Authors:
# Petr Viktorin <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Common tasks for FreeIPA integration tests"""
import os
import textwrap
import re
import collections
import itertools
import time
import StringIO
from ldif import LDIFWriter
from ipapython import ipautil
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython.ipa_log_manager import log_mgr
from ipatests.test_integration import util
from ipatests.test_integration.env_config import env_to_script
from ipatests.test_integration.host import Host
log = log_mgr.get_logger(__name__)
def prepare_host(host):
if isinstance(host, Host):
env_filename = os.path.join(host.config.test_dir, 'env.sh')
# First we try to run simple echo command to test the connection
host.run_command(['true'], set_env=False)
host.collect_log(env_filename)
host.transport.mkdir_recursive(host.config.test_dir)
host.put_file_contents(env_filename, env_to_script(host.to_env()))
def apply_common_fixes(host):
fix_etc_hosts(host)
fix_hostname(host)
fix_resolv_conf(host)
def backup_file(host, filename):
if host.transport.file_exists(filename):
backupname = os.path.join(host.config.test_dir, 'file_backup',
filename.lstrip('/'))
host.transport.mkdir_recursive(os.path.dirname(backupname))
host.run_command(['cp', '-af', filename, backupname])
return True
else:
rmname = os.path.join(host.config.test_dir, 'file_remove')
host.run_command('echo %s >> %s' % (
ipautil.shell_quote(filename),
ipautil.shell_quote(rmname)))
contents = host.get_file_contents(rmname)
host.transport.mkdir_recursive(os.path.dirname(rmname))
return False
def fix_etc_hosts(host):
backup_file(host, paths.HOSTS)
contents = host.get_file_contents(paths.HOSTS)
# Remove existing mentions of the host's FQDN, short name, and IP
contents = re.sub('\s%s(\s|$)' % re.escape(host.hostname), ' ', contents,
flags=re.MULTILINE)
contents = re.sub('\s%s(\s|$)' % re.escape(host.shortname), ' ', contents,
flags=re.MULTILINE)
contents = re.sub('^%s.*' % re.escape(host.ip), '', contents,
flags=re.MULTILINE)
# Add the host's info again
contents += '\n%s %s %s\n' % (host.ip, host.hostname, host.shortname)
log.debug('Writing the following to /etc/hosts:\n%s', contents)
host.put_file_contents(paths.HOSTS, contents)
def fix_hostname(host):
backup_file(host, paths.ETC_HOSTNAME)
host.put_file_contents(paths.ETC_HOSTNAME, host.hostname + '\n')
host.run_command(['hostname', host.hostname])
backupname = os.path.join(host.config.test_dir, 'backup_hostname')
host.run_command('hostname > %s' % ipautil.shell_quote(backupname))
def fix_resolv_conf(host):
backup_file(host, paths.RESOLV_CONF)
lines = host.get_file_contents(paths.RESOLV_CONF).splitlines()
lines = ['#' + l if l.startswith('nameserver') else l for l in lines]
for other_host in host.domain.hosts:
if other_host.role in ('master', 'replica'):
lines.append('nameserver %s' % other_host.ip)
contents = '\n'.join(lines)
log.debug('Writing the following to /etc/resolv.conf:\n%s', contents)
host.put_file_contents(paths.RESOLV_CONF, contents)
def fix_apache_semaphores(master):
systemd_available = master.transport.file_exists(paths.SYSTEMCTL)
if systemd_available:
master.run_command(['systemctl', 'stop', 'httpd'], raiseonerr=False)
else:
master.run_command([paths.SBIN_SERVICE, 'httpd', 'stop'], raiseonerr=False)
master.run_command('for line in `ipcs -s | grep apache | cut -d " " -f 2`; '
'do ipcrm -s $line; done', raiseonerr=False)
def unapply_fixes(host):
restore_files(host)
restore_hostname(host)
# Clean up the test directory
host.run_command(['rm', '-rvf', host.config.test_dir])
def restore_files(host):
backupname = os.path.join(host.config.test_dir, 'file_backup')
rmname = os.path.join(host.config.test_dir, 'file_remove')
# Prepare command for restoring context of the backed-up files
sed_remove_backupdir = 's/%s//g' % backupname.replace('/', '\/')
restorecon_command = (
"find %s | "
"sed '%s' | "
"sed '/^$/d' | "
"xargs -d '\n' "
"/sbin/restorecon -v" % (backupname, sed_remove_backupdir))
# Prepare command for actual restoring of the backed up files
copyfiles_command = 'if [ -d %(dir)s/ ]; then cp -arvf %(dir)s/* /; fi' % {
'dir': ipautil.shell_quote(backupname)}
# Run both commands in one session. For more information, see:
# https://fedorahosted.org/freeipa/ticket/4133
host.run_command('%s ; (%s ||:)' % (copyfiles_command, restorecon_command))
# Remove all the files that did not exist and were 'backed up'
host.run_command(['xargs', '-d', r'\n', '-a', rmname, 'rm', '-vf'],
raiseonerr=False)
host.run_command(['rm', '-rvf', backupname, rmname], raiseonerr=False)
def restore_hostname(host):
backupname = os.path.join(host.config.test_dir, 'backup_hostname')
try:
hostname = host.get_file_contents(backupname)
except IOError:
log.debug('No hostname backed up on %s' % host.hostname)
else:
host.run_command(['hostname', hostname.strip()])
host.run_command(['rm', backupname])
def enable_replication_debugging(host):
log.info('Enable LDAP replication logging')
logging_ldif = textwrap.dedent("""
dn: cn=config
changetype: modify
replace: nsslapd-errorlog-level
nsslapd-errorlog-level: 8192
""")
host.run_command(['ldapmodify', '-x',
'-D', str(host.config.dirman_dn),
'-w', host.config.dirman_password],
stdin_text=logging_ldif)
def install_master(host, setup_dns=True):
host.collect_log(paths.IPASERVER_INSTALL_LOG)
host.collect_log(paths.IPACLIENT_INSTALL_LOG)
inst = host.domain.realm.replace('.', '-')
host.collect_log(paths.SLAPD_INSTANCE_ERROR_LOG_TEMPLATE % inst)
host.collect_log(paths.SLAPD_INSTANCE_ACCESS_LOG_TEMPLATE % inst)
apply_common_fixes(host)
fix_apache_semaphores(host)
args = [
'ipa-server-install', '-U',
'-r', host.domain.name,
'-p', host.config.dirman_password,
'-a', host.config.admin_password
]
if setup_dns:
args.extend([
'--setup-dns',
'--forwarder', host.config.dns_forwarder
])
host.run_command(args)
enable_replication_debugging(host)
setup_sssd_debugging(host)
kinit_admin(host)
def install_replica(master, replica, setup_ca=True, setup_dns=False):
replica.collect_log(paths.IPAREPLICA_INSTALL_LOG)
replica.collect_log(paths.IPAREPLICA_CONNCHECK_LOG)
apply_common_fixes(replica)
fix_apache_semaphores(replica)
master.run_command(['ipa-replica-prepare',
'-p', replica.config.dirman_password,
'--ip-address', replica.ip,
replica.hostname])
replica_bundle = master.get_file_contents(
paths.REPLICA_INFO_GPG_TEMPLATE % replica.hostname)
replica_filename = os.path.join(replica.config.test_dir,
'replica-info.gpg')
replica.put_file_contents(replica_filename, replica_bundle)
args = ['ipa-replica-install', '-U',
'-p', replica.config.dirman_password,
'-w', replica.config.admin_password,
'--ip-address', replica.ip,
replica_filename]
if setup_ca:
args.append('--setup-ca')
if setup_dns:
args.extend([
'--setup-dns',
'--forwarder', replica.config.dns_forwarder
])
replica.run_command(args)
enable_replication_debugging(replica)
setup_sssd_debugging(replica)
kinit_admin(replica)
def install_client(master, client, extra_args=()):
client.collect_log(paths.IPACLIENT_INSTALL_LOG)
apply_common_fixes(client)
client.run_command(['ipa-client-install', '-U',
'--domain', client.domain.name,
'--realm', client.domain.realm,
'-p', client.config.admin_name,
'-w', client.config.admin_password,
'--server', master.hostname]
+ list(extra_args))
setup_sssd_debugging(client)
kinit_admin(client)
def install_adtrust(host):
"""
Runs ipa-adtrust-install on the client and generates SIDs for the entries.
Configures the compat tree for the legacy clients.
"""
# ipa-adtrust-install appends to ipaserver-install.log
host.collect_log(paths.IPASERVER_INSTALL_LOG)
inst = host.domain.realm.replace('.', '-')
host.collect_log(paths.SLAPD_INSTANCE_ERROR_LOG_TEMPLATE % inst)
host.collect_log(paths.SLAPD_INSTANCE_ACCESS_LOG_TEMPLATE % inst)
kinit_admin(host)
host.run_command(['ipa-adtrust-install', '-U',
'--enable-compat',
'--netbios-name', host.netbios,
'-a', host.config.admin_password,
'--add-sids'])
# Restart named because it lost connection to dirsrv
# (Directory server restarts during the ipa-adtrust-install)
# we use two services named and named-pkcs11,
# if named is masked restart named-pkcs11
result = host.run_command(['systemctl', 'is-enabled', 'named'],
raiseonerr=False)
if result.stdout_text.startswith("masked"):
host.run_command(['systemctl', 'restart', 'named-pkcs11'])
else:
host.run_command(['systemctl', 'restart', 'named'])
# Check that named is running and has loaded the information from LDAP
dig_command = ['dig', 'SRV', '+short', '@localhost',
'_ldap._tcp.%s' % host.domain.name]
dig_output = '0 100 389 %s.' % host.hostname
dig_test = lambda x: re.search(re.escape(dig_output), x)
util.run_repeatedly(host, dig_command, test=dig_test)
def configure_dns_for_trust(master, ad):
"""
This configures DNS on IPA master according to the relationship of the
IPA's and AD's domains.
"""
def is_subdomain(subdomain, domain):
subdomain_unpacked = subdomain.split('.')
domain_unpacked = domain.split('.')
subdomain_unpacked.reverse()
domain_unpacked.reverse()
subdomain = False
if len(subdomain_unpacked) > len(domain_unpacked):
subdomain = True
for subdomain_segment, domain_segment in zip(subdomain_unpacked,
domain_unpacked):
subdomain = subdomain and subdomain_segment == domain_segment
return subdomain
kinit_admin(master)
if is_subdomain(ad.domain.name, master.domain.name):
master.run_command(['ipa', 'dnsrecord-add', master.domain.name,
'%s.%s' % (ad.shortname, ad.netbios),
'--a-ip-address', ad.ip])
master.run_command(['ipa', 'dnsrecord-add', master.domain.name,
ad.netbios,
'--ns-hostname',
'%s.%s' % (ad.shortname, ad.netbios)])
master.run_command(['ipa', 'dnszone-mod', master.domain.name,
'--allow-transfer', ad.ip])
else:
master.run_command(['ipa', 'dnsforwardzone-add', ad.domain.name,
'--forwarder', ad.ip,
'--forward-policy', 'only',
])
def establish_trust_with_ad(master, ad, extra_args=()):
"""
Establishes trust with Active Directory. Trust type is detected depending
on the presence of SfU (Services for Unix) support on the AD.
Use extra arguments to pass extra arguments to the trust-add command, such
as --range-type="ipa-ad-trust" to enfroce a particular range type.
"""
# Force KDC to reload MS-PAC info by trying to get TGT for HTTP
master.run_command(['kinit', '-kt', paths.IPA_KEYTAB,
'HTTP/%s' % master.hostname])
master.run_command(['systemctl', 'restart', 'krb5kdc.service'])
master.run_command(['kdestroy', '-A'])
kinit_admin(master)
master.run_command(['klist'])
master.run_command(['smbcontrol', 'all', 'debug', '100'])
util.run_repeatedly(master,
['ipa', 'trust-add',
'--type', 'ad', ad.domain.name,
'--admin', 'Administrator',
'--password'] + list(extra_args),
stdin_text=master.config.ad_admin_password)
master.run_command(['smbcontrol', 'all', 'debug', '1'])
clear_sssd_cache(master)
def remove_trust_with_ad(master, ad):
"""
Removes trust with Active Directory. Also removes the associated ID range.
"""
kinit_admin(master)
# Remove the trust
master.run_command(['ipa', 'trust-del', ad.domain.name])
# Remove the range
range_name = ad.domain.name.upper() + '_id_range'
master.run_command(['ipa', 'idrange-del', range_name])
def configure_auth_to_local_rule(master, ad):
"""
Configures auth_to_local rule in /etc/krb5.conf
"""
section_identifier = " %s = {" % master.domain.realm
line1 = (" auth_to_local = RULE:[1:$1@$0](^.*@%s$)s/@%s/@%s/"
% (ad.domain.realm, ad.domain.realm, ad.domain.name))
line2 = " auth_to_local = DEFAULT"
krb5_conf_content = master.get_file_contents(paths.KRB5_CONF)
krb5_lines = [line.rstrip() for line in krb5_conf_content.split('\n')]
realm_section_index = krb5_lines.index(section_identifier)
krb5_lines.insert(realm_section_index + 1, line1)
krb5_lines.insert(realm_section_index + 2, line2)
krb5_conf_new_content = '\n'.join(krb5_lines)
master.put_file_contents(paths.KRB5_CONF, krb5_conf_new_content)
master.run_command(['systemctl', 'restart', 'sssd'])
def setup_sssd_debugging(host):
"""
Sets debug level to 7 in each section of sssd.conf file.
"""
# Set debug level in each section of sssd.conf file to 7
# First, remove any previous occurences
host.run_command(['sed', '-i',
'/debug_level = 7/d',
paths.SSSD_CONF
], raiseonerr=False)
# Add the debug directive to each section
host.run_command(['sed', '-i',
'/\[*\]/ a\debug_level = 7',
paths.SSSD_CONF
], raiseonerr=False)
host.collect_log('/var/log/sssd/*')
# Clear the cache and restart SSSD
clear_sssd_cache(host)
def clear_sssd_cache(host):
"""
Clears SSSD cache by removing the cache files. Restarts SSSD.
"""
systemd_available = host.transport.file_exists(paths.SYSTEMCTL)
if systemd_available:
host.run_command(['systemctl', 'stop', 'sssd'])
else:
host.run_command([paths.SBIN_SERVICE, 'sssd', 'stop'])
host.run_command("find /var/lib/sss/db -name '*.ldb' | "
"xargs rm -fv")
host.run_command(['rm', '-fv', paths.SSSD_MC_GROUP])
host.run_command(['rm', '-fv', paths.SSSD_MC_PASSWD])
if systemd_available:
host.run_command(['systemctl', 'start', 'sssd'])
else:
host.run_command([paths.SBIN_SERVICE, 'sssd', 'start'])
# To avoid false negatives due to SSSD not responding yet
time.sleep(10)
def sync_time(host, server):
"""
Syncs the time with the remote server. Please note that this function
leaves ntpd stopped.
"""
host.run_command(['systemctl', 'stop', 'ntpd'])
host.run_command(['ntpdate', server.hostname])
def connect_replica(master, replica):
kinit_admin(replica)
replica.run_command(['ipa-replica-manage', 'connect', master.hostname])
def disconnect_replica(master, replica):
kinit_admin(replica)
replica.run_command(['ipa-replica-manage', 'disconnect', master.hostname])
def kinit_admin(host):
host.run_command(['kinit', 'admin'],
stdin_text=host.config.admin_password)
def uninstall_master(host):
host.collect_log(paths.IPASERVER_UNINSTALL_LOG)
host.run_command(['ipa-server-install', '--uninstall', '-U'],
raiseonerr=False)
host.run_command(['pkidestroy', '-s', 'CA', '-i', 'pki-tomcat'],
raiseonerr=False)
host.run_command(['rm', '-rf',
paths.TOMCAT_TOPLEVEL_DIR,
paths.SYSCONFIG_PKI_TOMCAT,
paths.SYSCONFIG_PKI_TOMCAT_PKI_TOMCAT_DIR,
paths.VAR_LIB_PKI_TOMCAT_DIR,
paths.PKI_TOMCAT],
raiseonerr=False)
unapply_fixes(host)
def uninstall_client(host):
host.collect_log(paths.IPACLIENT_UNINSTALL_LOG)
host.run_command(['ipa-client-install', '--uninstall', '-U'],
raiseonerr=False)
unapply_fixes(host)
def get_topo(name_or_func):
"""Get a topology function by name
A topology function receives a master and list of replicas, and yields
(parent, child) pairs, where "child" should be installed from "parent"
(or just connected if already installed)
If a callable is given instead of name, it is returned directly
"""
if callable(name_or_func):
return name_or_func
return topologies[name_or_func]
def _topo(name):
"""Decorator that registers a function in topologies under a given name"""
def add_topo(func):
topologies[name] = func
return func
return add_topo
topologies = collections.OrderedDict()
@_topo('star')
def star_topo(master, replicas):
r"""All replicas are connected to the master
Rn R1 R2
\ | /
R7-- M -- R3
/ | \
R6 R5 R4
"""
for replica in replicas:
yield master, replica
@_topo('line')
def line_topo(master, replicas):
r"""Line topology
M
\
R1
\
R2
\
R3
\
...
"""
for replica in replicas:
yield master, replica
master = replica
@_topo('complete')
def complete_topo(master, replicas):
r"""Each host connected to each other host
M--R1
|\/|
|/\|
R2-R3
"""
for replica in replicas:
yield master, replica
for replica1, replica2 in itertools.combinations(replicas, 2):
yield replica1, replica2
@_topo('tree')
def tree_topo(master, replicas):
r"""Binary tree topology
M
/ \
/ \
R1 R2
/ \ / \
R3 R4 R5 R6
/
R7 ...
"""
replicas = list(replicas)
def _masters():
for host in [master] + replicas:
yield host
yield host
for parent, child in zip(_masters(), replicas):
yield parent, child
@_topo('tree2')
def tree2_topo(master, replicas):
r"""First replica connected directly to master, the rest in a line
M
/ \
R1 R2
\
R3
\
R4
\
...
"""
if replicas:
yield master, replicas[0]
for replica in replicas[1:]:
yield master, replica
master = replica
def install_topo(topo, master, replicas, clients,
skip_master=False, setup_replica_cas=True):
"""Install IPA servers and clients in the given topology"""
replicas = list(replicas)
installed = {master}
if not skip_master:
install_master(master)
add_a_records_for_hosts_in_master_domain(master)
for parent, child in get_topo(topo)(master, replicas):
if child in installed:
log.info('Connecting replica %s to %s' % (parent, child))
connect_replica(parent, child)
else:
log.info('Installing replica %s from %s' % (parent, child))
install_replica(parent, child, setup_ca=setup_replica_cas)
installed.add(child)
install_clients([master] + replicas, clients)
def install_clients(servers, clients):
"""Install IPA clients, distributing them among the given servers"""
for server, client in itertools.izip(itertools.cycle(servers), clients):
log.info('Installing client %s on %s' % (server, client))
install_client(server, client)
def _entries_to_ldif(entries):
"""Format LDAP entries as LDIF"""
lines = []
io = StringIO.StringIO()
writer = LDIFWriter(io)
for entry in entries:
writer.unparse(str(entry.dn), dict(entry))
return io.getvalue()
def wait_for_replication(ldap, timeout=30):
"""Wait until updates on all replication agreements are done (or failed)
:param ldap: LDAP client
autenticated with necessary rights to read the mapping tree
:param timeout: Maximum time to wait, in seconds
Note that this waits for updates originating on this host, not those
coming from other hosts.
"""
log.debug('Waiting for replication to finish')
for i in range(timeout):
time.sleep(1)
status_attr = 'nsds5replicaLastUpdateStatus'
progress_attr = 'nsds5replicaUpdateInProgress'
entries = ldap.get_entries(
DN(('cn', 'mapping tree'), ('cn', 'config')),
filter='(objectclass=nsds5replicationagreement)',
attrs_list=[status_attr, progress_attr])
log.debug('Replication agreements: \n%s', _entries_to_ldif(entries))
if any(not e.single_value[status_attr].startswith('0 ')
for e in entries):
log.error('Replication error')
continue
if any(e.single_value[progress_attr] == 'TRUE' for e in entries):
log.debug('Replication in progress (waited %s/%ss)',
i, timeout)
else:
log.debug('Replication finished')
break
else:
log.error('Giving up wait for replication to finish')
def add_a_records_for_hosts_in_master_domain(master):
for host in master.domain.hosts:
# We don't need to take care of the zone creation since it is master
# domain
add_a_record(master, host)
def add_a_record(master, host):
# Find out if the record is already there
cmd = master.run_command(['ipa',
'dnsrecord-find',
master.domain.name,
host.hostname,
'--a-rec', host.ip],
raiseonerr=False)
# If not, add it
if cmd.returncode != 0:
master.run_command(['ipa',
'dnsrecord-add',
master.domain.name,
host.hostname,
'--a-rec', host.ip])
| hroncok/freeipa | ipatests/test_integration/tasks.py | Python | gpl-3.0 | 23,938 |
# lxnstack is a program to align and stack atronomical images
# Copyright (C) 2013-2015 Maurizio D'Addona <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from xml.dom import minidom
import numpy as np
import log
import utils
import imgfeatures
import lightcurves as lcurves
def getProjectAbsURL(project_dir, url):
if os.path.isabs(url):
return url
else:
abs_url = os.path.join(project_dir, url)
return os.path.realpath(abs_url)
def saveTransfTableToFile(fname, dic):
doc = minidom.Document()
root = doc.createElement('color-transformation-table')
doc.appendChild(root)
for key in dic:
try:
band1 = key[0]
band2 = key[1]
except:
raise TypeError("Not a color tranfrmation coefficient table!")
else:
val = dic[key]
try:
coeff = val[0]
err = val[1]
except:
raise TypeError("Not a color tranfrmation coefficient table!")
else:
node = doc.createElement('transformation-coefficient')
node.setAttribute('band1', str(band1))
node.setAttribute('band2', str(band2))
node.setAttribute('value', str(coeff))
node.setAttribute('error', str(err))
root.appendChild(node)
del node
try:
f = open(fname, 'w')
f.write(doc.toprettyxml(' ', '\n'))
f.close()
except IOError as err:
log.log("<lxnstack.projects module>",
"Cannot save the color transformation table: " + str(err),
level=logging.ERROR)
raise err
def loadTransfTableFromFile(fname):
try:
dom = minidom.parse(fname)
except Exception as err:
log.log("<lxnstack.projects module>",
"failed to parse project, xml formatting error: '{}'".format(
str(err)),
level=logging.ERROR)
raise(err)
root = dom.getElementsByTagName('color-transformation-table')[0]
dic = {}
for node in root.getElementsByTagName('transformation-coefficient'):
b1 = node.getAttribute('band1')
b2 = node.getAttribute('band2')
val = np.float64(node.getAttribute('value'))
err = np.float64(node.getAttribute('error'))
dic[(b1, b2)] = (val, err)
return dic
class Project(object):
def __init__(self, frame_open_args):
self.frame_open_args = frame_open_args
self.bias_frames = []
self.dark_frames = []
self.flat_frames = []
self.light_frames = []
self.master_bias_url = ""
self.master_dark_url = ""
self.master_flat_url = ""
self.master_bias_mul = 1
self.master_dark_mul = 1
self.master_flat_mul = 1
self.use_master_bias = False
self.use_master_dark = False
self.use_master_flat = False
self.imw = -1
self.imh = -1
self.dep = -1
self.current_image_idx = -1
self.aap_rectangle = (-1, -1)
self.max_points = -1
self.min_quality = -1
self.use_whole_image = -1
self.use_image_time = False
self.project_directory = ""
self.channel_mapping = {}
def loadProject(self, project_fname=None):
if not project_fname.strip():
log.log(repr(self),
' no project selected, retvert to previous state',
level=logging.INFO)
return None
else:
log.log(repr(self),
' project name: \''+str(project_fname)+'\'',
level=logging.DEBUG)
proj_path = os.path.dirname(project_fname)
try:
dom = minidom.parse(project_fname)
except Exception as err:
log.log(repr(self),
'failed to parse project, xml formatting error',
level=logging.ERROR)
raise(err)
try:
root = dom.getElementsByTagName('project')[0]
information_node = root.getElementsByTagName('information')[0]
dark_frames_node = root.getElementsByTagName('dark-frames')[0]
flat_frames_node = root.getElementsByTagName('flat-frames')[0]
pict_frames_node = root.getElementsByTagName('frames')[0]
try: # backward compatibility
bs_node_list = root.getElementsByTagName('bias-frames')
bias_frames_node = bs_node_list[0]
mbs_list = information_node.getElementsByTagName('master-bias')
master_bias_node = mbs_list[0]
mbs_ckd = master_bias_node.getAttribute('checked')
master_bias_checked = int(mbs_ckd)
mbs_mul = master_bias_node.getAttribute('mul')
master_bias_mul = float(mbs_mul)
has_bias_section = True
except Exception as exc:
log.log(repr(self),
'No bias section',
level=logging.DEBUG)
master_bias_node = None
has_bias_section = False
try: # backward compatibility
photometry_list = root.getElementsByTagName('photometry')
photometry_node = photometry_list[0]
has_photometry_section = True
except Exception as exc:
log.log(repr(self),
'no fotometric section, skipping star loading',
level=logging.DEBUG)
has_photometry_section = False
log.log(repr(self),
'loading project information',
level=logging.DEBUG)
cdir_lst = information_node.getElementsByTagName('current-dir')
crow_lst = information_node.getElementsByTagName('current-row')
mdrk_lst = information_node.getElementsByTagName('master-dark')
mflt_lst = information_node.getElementsByTagName('master-flat')
arct_lst = information_node.getElementsByTagName('align-rect')
mp_lst = information_node.getElementsByTagName('max-align-points')
mq_lst = information_node.getElementsByTagName('min-point-quality')
current_dir_node = cdir_lst[0]
current_row_node = crow_lst[0]
master_dark_node = mdrk_lst[0]
master_flat_node = mflt_lst[0]
align_rect_node = arct_lst[0]
max_points_node = mp_lst[0]
min_quality_node = mq_lst[0]
imw = int(information_node.getAttribute('width'))
imh = int(information_node.getAttribute('height'))
dep = int(information_node.getAttribute('mode'))
try:
bayer_mode = int(information_node.getAttribute('bayer-mode'))
except:
bayer_mode = -1
ar_w = int(align_rect_node.getAttribute('width'))
ar_h = int(align_rect_node.getAttribute('height'))
use_whole_image = int(align_rect_node.getAttribute('whole-image'))
max_points = int(max_points_node.getAttribute('value'))
min_quality = float(min_quality_node.getAttribute('value'))
current_dir = current_dir_node.getAttribute('url')
current_row = int(current_row_node.getAttribute('index'))
master_dark_checked = int(master_dark_node.getAttribute('checked'))
master_flat_checked = int(master_flat_node.getAttribute('checked'))
master_dark_mul = float(master_dark_node.getAttribute('mul'))
master_flat_mul = float(master_flat_node.getAttribute('mul'))
try:
url_node = master_bias_node.getElementsByTagName('url')[0]
node_url = url_node.childNodes[0].data
master_bias_url = getProjectAbsURL(proj_path, node_url)
except:
master_bias_url = ''
try:
url_node = master_dark_node.getElementsByTagName('url')[0]
node_url = url_node.childNodes[0].data
master_dark_url = getProjectAbsURL(proj_path, node_url)
except:
master_dark_url = ''
try:
url_node = master_flat_node.getElementsByTagName('url')[0]
node_url = url_node.childNodes[0].data
master_flat_url = getProjectAbsURL(proj_path, node_url)
except:
master_flat_url = ''
biasframelist = []
if has_bias_section:
log.log(repr(self),
'reading bias-frames section',
level=logging.DEBUG)
for node in bias_frames_node.getElementsByTagName('image'):
im_bias_name = node.getAttribute('name')
try:
im_bias_used = int(node.getAttribute('used'))
except Exception as exc:
try:
st_im_used = str(node.getAttribute('used')).lower()
if st_im_used.lower() == 'false':
im_bias_used = 0
elif st_im_used.lower() == 'true':
im_bias_used = 2
else:
raise exc
except:
im_bias_used = 2
url_bias_node = node.getElementsByTagName('url')[0]
_bias_url = url_bias_node.childNodes[0].data
im_bias_url = getProjectAbsURL(proj_path, _bias_url)
if 'page' in url_bias_node.attributes.keys():
im_bias_page = url_bias_node.getAttribute('page')
biasfrm = utils.Frame(im_bias_url,
int(im_bias_page),
skip_loading=False,
**self.frame_open_args)
else:
biasfrm = utils.Frame(im_bias_url,
0,
skip_loading=False,
**self.frame_open_args)
biasfrm.tool_name = im_bias_name
biasfrm.width = imw
biasfrm.height = imh
biasfrm.mode = dep
biasfrm.setUsed(im_bias_used)
biasfrm.addProperty('frametype', utils.BIAS_FRAME_TYPE)
biasframelist.append(biasfrm)
log.log(repr(self),
'reading dark-frames section',
level=logging.DEBUG)
darkframelist = []
for node in dark_frames_node.getElementsByTagName('image'):
im_dark_name = node.getAttribute('name')
try:
im_dark_used = int(node.getAttribute('used'))
except Exception as exc:
try:
st_im_used = str(node.getAttribute('used')).lower()
if st_im_used.lower() == 'false':
im_dark_used = 0
elif st_im_used.lower() == 'true':
im_dark_used = 2
else:
raise exc
except:
im_dark_used = 2
url_dark_node = node.getElementsByTagName('url')[0]
_dark_url = url_dark_node.childNodes[0].data
im_dark_url = getProjectAbsURL(proj_path, _dark_url)
if 'page' in url_dark_node.attributes.keys():
im_dark_page = url_dark_node.getAttribute('page')
darkfrm = utils.Frame(im_dark_url,
int(im_dark_page),
skip_loading=False,
**self.frame_open_args)
else:
darkfrm = utils.Frame(im_dark_url,
0,
skip_loading=False,
**self.frame_open_args)
darkfrm.tool_name = im_dark_name
darkfrm.width = imw
darkfrm.height = imh
darkfrm.mode = dep
darkfrm.setUsed(im_dark_used)
darkfrm.addProperty('frametype', utils.DARK_FRAME_TYPE)
darkframelist.append(darkfrm)
log.log(repr(self),
'reading flatfield-frames section',
level=logging.DEBUG)
flatframelist = []
for node in flat_frames_node.getElementsByTagName('image'):
im_flat_name = node.getAttribute('name')
try:
im_flat_used = int(node.getAttribute('used'))
except Exception as exc:
try:
st_im_used = str(node.getAttribute('used')).lower()
if st_im_used.lower() == 'false':
im_flat_name = 0
elif st_im_used.lower() == 'true':
im_flat_name = 2
else:
raise exc
except:
im_flat_name = 2
url_flat_node = node.getElementsByTagName('url')[0]
_flat_url = url_flat_node.childNodes[0].data
im_flat_url = getProjectAbsURL(proj_path, _flat_url)
if 'page' in url_flat_node.attributes.keys():
im_flat_page = url_flat_node.getAttribute('page')
flatfrm = utils.Frame(im_flat_url,
int(im_flat_page),
skip_loading=False,
**self.frame_open_args)
else:
flatfrm = utils.Frame(im_flat_url,
0,
skip_loading=False,
**self.frame_open_args)
flatfrm.tool_name = im_flat_name
flatfrm.width = imw
flatfrm.height = imh
flatfrm.mode = dep
flatfrm.setUsed(im_flat_used)
flatfrm.addProperty('frametype', utils.FLAT_FRAME_TYPE)
flatframelist.append(flatfrm)
log.log(repr(self),
'reading light-frames section',
level=logging.DEBUG)
framelist = []
for node in pict_frames_node.getElementsByTagName('image'):
im_name = node.getAttribute('name')
try:
im_used = int(node.getAttribute('used'))
except Exception as exc:
st_im_used = str(node.getAttribute('used')).lower()
if st_im_used.lower() == 'false':
im_used = 0
elif st_im_used.lower() == 'true':
im_used = 2
else:
raise exc
im_url_node = node.getElementsByTagName('url')[0]
_url = im_url_node.childNodes[0].data
im_url = getProjectAbsURL(proj_path, _url)
if 'page' in im_url_node.attributes.keys():
im_page = im_url_node.getAttribute('page')
frm = utils.Frame(im_url,
int(im_page),
skip_loading=True,
**self.frame_open_args)
else:
frm = utils.Frame(im_url,
0,
skip_loading=True,
**self.frame_open_args)
for point in node.getElementsByTagName('align-point'):
point_id = point.getAttribute('id')
point_al = point.getAttribute('aligned').lower()
point_al = bool(point_al == 'True')
point_x = int(point.getAttribute('x'))
point_y = int(point.getAttribute('y'))
pnt = imgfeatures.AlignmentPoint(point_x, point_y,
point_id, point_id)
pnt.aligned = point_al
frm.addAlignPoint(pnt)
for s in node.getElementsByTagName('star'):
st_x = int(s.getAttribute('x'))
st_y = int(s.getAttribute('y'))
st_name = s.getAttribute('name')
st_id = s.getAttribute('id')
st_r1 = float(s.getAttribute('inner_radius'))
st_r2 = float(s.getAttribute('middle_radius'))
st_r3 = float(s.getAttribute('outer_radius'))
st_ref = bool(int(s.getAttribute('reference')))
st_mag = {}
for attrname in s.attributes.keys():
if attrname[0:4] == 'mag_':
bandname = attrname[4:]
magval = float(s.getAttribute(attrname))
st_mag[bandname] = magval
else:
continue
st = imgfeatures.Star(st_x, st_y,
st_name, st_id)
st.r1 = st_r1
st.r2 = st_r2
st.r3 = st_r3
st.reference = st_ref
st.magnitude = st_mag
frm.addStar(st)
for star in node.getElementsByTagName('align-point'):
point_id = point.getAttribute('id')
point_al = point.getAttribute('aligned').lower()
point_al = bool(point_al == 'True')
point_x = int(point.getAttribute('x'))
point_y = int(point.getAttribute('y'))
pnt = imgfeatures.AlignmentPoint(point_x, point_y,
point_id, point_id)
pnt.aligned = point_al
frm.alignpoints.append(pnt)
offset_node = node.getElementsByTagName('offset')[0]
offset_x = float(offset_node.getAttribute('x'))
offset_y = float(offset_node.getAttribute('y'))
if 'theta' in offset_node.attributes.keys():
offset_t = float(offset_node.getAttribute('theta'))
else:
offset_t = 0
frm.tool_name = im_name
frm.width = imw
frm.height = imh
frm.mode = dep
frm.setOffset([offset_x, offset_y])
frm.setAngle(offset_t)
frm.setUsed(im_used)
frm.addProperty('frametype', utils.LIGHT_FRAME_TYPE)
framelist.append(frm)
if has_photometry_section:
log.log(repr(self),
'reading photometry section',
level=logging.DEBUG)
time_attr = int(photometry_node.getAttribute('time_type'))
use_image_time = bool(time_attr)
# photometry section
channel_mapping = {}
sels = photometry_node.getElementsByTagName('channel')
for comp in sels:
idx = int(comp.getAttribute('index'))
nme = comp.getAttribute('band')
channel_mapping[idx] = nme
if not channel_mapping:
msg = "No channel mapping in photometry section."
raise ValueError(msg)
else:
use_image_time = self.use_image_time
channel_mapping = lcurves.getComponentTable(dep)
except Exception as exc:
log.log(repr(self),
'An error has occurred while reading the project:' +
'\"' + str(exc) + '\"',
level=logging.ERROR)
raise(exc)
self.imw = imw
self.imh = imh
self.dep = dep
self.light_frames = framelist
self.bias_frames = biasframelist
self.dark_frames = darkframelist
self.flat_frames = flatframelist
self.current_image_idx = current_row
self.master_bias_url = master_bias_url
self.master_dark_url = master_dark_url
self.master_flat_url = master_flat_url
self.master_bias_mul = master_bias_mul
self.master_dark_mul = master_dark_mul
self.master_flat_mul = master_flat_mul
self.use_master_bias = bool(master_bias_checked)
self.use_master_dark = bool(master_dark_checked)
self.use_master_flat = bool(master_flat_checked)
self.bayer_mode = bayer_mode
self.aap_rectangle = (ar_w, ar_h)
self.max_points = max_points
self.min_quality = min_quality
self.use_whole_image = use_whole_image
self.project_directory = current_dir
self.use_image_time = use_image_time
self.channel_mapping = channel_mapping
log.log(repr(self),
'project fully loaded',
level=logging.INFO)
def saveProject(self, project_fname):
doc = minidom.Document()
root = doc.createElement('project')
doc.appendChild(root)
information_node = doc.createElement('information')
bias_frames_node = doc.createElement('bias-frames')
dark_frames_node = doc.createElement('dark-frames')
flat_frames_node = doc.createElement('flat-frames')
pict_frames_node = doc.createElement('frames')
photometry_node = doc.createElement('photometry')
root.appendChild(information_node)
root.appendChild(bias_frames_node)
root.appendChild(dark_frames_node)
root.appendChild(flat_frames_node)
root.appendChild(pict_frames_node)
root.appendChild(photometry_node)
# <information> section
information_node.setAttribute('width', str(int(self.imw)))
information_node.setAttribute('height', str(int(self.imh)))
information_node.setAttribute('mode', str(int(self.dep)))
information_node.setAttribute('bayer-mode', str(int(self.bayer_mode)))
current_dir_node = doc.createElement('current-dir')
current_row_node = doc.createElement('current-row')
master_bias_node = doc.createElement('master-bias')
master_dark_node = doc.createElement('master-dark')
master_flat_node = doc.createElement('master-flat')
min_quality_node = doc.createElement('min-point-quality')
max_points_node = doc.createElement('max-align-points')
align_rect_node = doc.createElement('align-rect')
information_node.appendChild(current_dir_node)
information_node.appendChild(current_row_node)
information_node.appendChild(master_bias_node)
information_node.appendChild(master_dark_node)
information_node.appendChild(master_flat_node)
information_node.appendChild(align_rect_node)
information_node.appendChild(max_points_node)
information_node.appendChild(min_quality_node)
mb_cck_state = self.use_master_bias*2
md_cck_state = self.use_master_dark*2
mf_cck_state = self.use_master_flat*2
current_dir_node.setAttribute('url', str(self.project_directory))
current_row_node.setAttribute('index', str(self.current_image_idx))
master_bias_node.setAttribute('checked', str(mb_cck_state))
master_bias_node.setAttribute('mul', str(self.master_bias_mul))
master_dark_node.setAttribute('checked', str(md_cck_state))
master_dark_node.setAttribute('mul', str(self.master_dark_mul))
master_flat_node.setAttribute('checked', str(mf_cck_state))
master_flat_node.setAttribute('mul', str(self.master_flat_mul))
align_rect_node.setAttribute('width', str(self.aap_rectangle[0]))
align_rect_node.setAttribute('height', str(self.aap_rectangle[1]))
align_rect_node.setAttribute('whole-image', str(self.use_whole_image))
max_points_node.setAttribute('value', str(self.max_points))
min_quality_node.setAttribute('value', str(self.min_quality))
url = doc.createElement('url')
url_txt = doc.createTextNode(str(self.master_bias_url))
url.appendChild(url_txt)
master_bias_node.appendChild(url)
url = doc.createElement('url')
url_txt = doc.createTextNode(str(self.master_dark_url))
url.appendChild(url_txt)
master_dark_node.appendChild(url)
url = doc.createElement('url')
url_txt = doc.createTextNode(str(self.master_flat_url))
url.appendChild(url_txt)
master_flat_node.appendChild(url)
# <bias-frams> section
for i in self.bias_frames:
im_bias_used = str(i.isUsed())
im_bias_name = str(i.tool_name)
im_bias_page = i.page
im_bias_url = i.url
image_node = doc.createElement('image')
image_node.setAttribute('name', im_bias_name)
image_node.setAttribute('used', im_bias_used)
bias_frames_node.appendChild(image_node)
url = doc.createElement('url')
url_txt = doc.createTextNode(im_bias_url)
url.appendChild(url_txt)
url.setAttribute('page', str(im_bias_page))
image_node.appendChild(url)
# <dark-frams> section
for i in self.dark_frames:
im_dark_used = str(i.isUsed())
im_dark_name = str(i.tool_name)
im_dark_page = i.page
im_dark_url = i.url
image_node = doc.createElement('image')
image_node.setAttribute('name', im_dark_name)
image_node.setAttribute('used', im_dark_used)
dark_frames_node.appendChild(image_node)
url = doc.createElement('url')
url_txt = doc.createTextNode(im_dark_url)
url.appendChild(url_txt)
url.setAttribute('page', str(im_dark_page))
image_node.appendChild(url)
# <flat-frames> section
for i in self.flat_frames:
im_flat_used = str(i.isUsed())
im_flat_name = str(i.tool_name)
im_flat_page = i.page
im_flat_url = i.url
image_node = doc.createElement('image')
image_node.setAttribute('name', im_flat_name)
image_node.setAttribute('used', im_flat_used)
flat_frames_node.appendChild(image_node)
url = doc.createElement('url')
url_txt = doc.createTextNode(im_flat_url)
url.appendChild(url_txt)
url.setAttribute('page', str(im_flat_page))
image_node.appendChild(url)
# <frames> section
for img in self.light_frames:
im_used = str(img.isUsed())
im_name = str(img.tool_name)
im_page = img.page
im_url = img.url
image_node = doc.createElement('image')
image_node.setAttribute('name', im_name)
image_node.setAttribute('used', im_used)
pict_frames_node.appendChild(image_node)
for point in img.alignpoints:
point_node = doc.createElement('align-point')
point_node.setAttribute('x', str(int(point.x)))
point_node.setAttribute('y', str(int(point.y)))
point_node.setAttribute('id', str(point.id))
point_node.setAttribute('name', str(point.name))
point_node.setAttribute('aligned', str(point.aligned))
image_node.appendChild(point_node)
for s in img.stars:
star_node = doc.createElement('star')
star_node.setAttribute('x', str(int(s.x)))
star_node.setAttribute('y', str(int(s.y)))
star_node.setAttribute('name', str(s.name))
star_node.setAttribute('id', str(s.id))
star_node.setAttribute('inner_radius', str(float(s.r1)))
star_node.setAttribute('middle_radius', str(float(s.r2)))
star_node.setAttribute('outer_radius', str(float(s.r3)))
star_node.setAttribute('reference', str(int(s.reference)))
for band in s.magnitude:
name = "mag_{}".format(band)
mag = str(float(s.magnitude[band]))
star_node.setAttribute(name, mag)
image_node.appendChild(star_node)
offset_node = doc.createElement('offset')
offset_node.setAttribute('x', str(float(img.offset[0])))
offset_node.setAttribute('y', str(float(img.offset[1])))
offset_node.setAttribute('theta', str(float(img.angle)))
image_node.appendChild(offset_node)
url = doc.createElement('url')
url_txt = doc.createTextNode(im_url)
url.appendChild(url_txt)
url.setAttribute('page', str(im_page))
image_node.appendChild(url)
# photometry section
img_tm = int(self.use_image_time)
photometry_node.setAttribute('time_type', str(img_tm))
# photometry section
for ch in self.channel_mapping:
channel_node = doc.createElement('channel')
channel_node.setAttribute('index', str(ch))
channel_node.setAttribute('band', str(self.channel_mapping[ch]))
photometry_node.appendChild(channel_node)
try:
f = open(project_fname, 'w')
f.write(doc.toprettyxml(' ', '\n'))
f.close()
except IOError as err:
log.log(repr(self),
"Cannot save the project: " + str(err),
level=logging.ERROR)
raise err
| mauritiusdadd/lxnstack | lxnstack/projects.py | Python | gpl-3.0 | 31,137 |
# -*- Mode: Python -*-
import unittest
import glib
import gio
class TestResolver(unittest.TestCase):
def setUp(self):
self.resolver = gio.resolver_get_default()
def test_resolver_lookup_by_name(self):
addresses = self.resolver.lookup_by_name("pygtk.org", cancellable=None)
self.failUnless(isinstance(addresses[0], gio.InetAddress))
def test_resolver_lookup_by_address(self):
address = gio.inet_address_new_from_string("8.8.8.8")
dns = self.resolver.lookup_by_address(address, cancellable=None)
self.failUnlessEqual(dns, "google-public-dns-a.google.com")
def test_resolver_lookup_by_name_async(self):
def callback(resolver, result):
try:
addresses = resolver.lookup_by_name_finish(result)
self.failUnless(isinstance(addresses[0], gio.InetAddress))
finally:
loop.quit()
self.resolver.lookup_by_name_async(callback, "pygtk.org")
loop = glib.MainLoop()
loop.run()
def test_resolver_lookup_by_address_async(self):
def callback(resolver, result):
try:
dns = resolver.lookup_by_address_finish(result)
self.failUnlessEqual(dns, "google-public-dns-b.google.com")
finally:
loop.quit()
address = gio.inet_address_new_from_string("8.8.4.4")
self.resolver.lookup_by_address_async(callback, address)
loop = glib.MainLoop()
loop.run()
""" Commented out because this requires an active internet connection and a
router that supports SRV lookups
def test_resolver_lookup_service(self):
targets = self.resolver.lookup_service("xmpp-client", "tcp", "google.com")
self.failUnless(isinstance(targets[0], gio.SrvTarget))
def test_resolver_lookup_service_async(self):
def callback(resolver, result):
try:
targets = resolver.lookup_service_finish(result)
self.failUnless(isinstance(targets[0], gio.SrvTarget))
finally:
loop.quit()
self.resolver.lookup_service_async(callback, "xmpp-client", "tcp", "google.com")
loop = glib.MainLoop()
loop.run()
"""
| Distrotech/pygobject | tests/test_gresolver.py | Python | lgpl-2.1 | 2,263 |
#!/usr/bin/env python
from __future__ import unicode_literals
from datetime import datetime, date
from unittest import TestCase
from wtforms.form import Form
from wtforms.ext.dateutil.fields import DateTimeField, DateField
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
class DateutilTest(TestCase):
class F(Form):
a = DateTimeField()
b = DateField(default=lambda: date(2004, 9, 12))
c = DateField(parse_kwargs=dict(yearfirst=True, dayfirst=False))
def test_form_input(self):
f = self.F(DummyPostData(a='2008/09/12 4:17 PM', b='04/05/06', c='04/05/06'))
self.assertEqual(f.a.data, datetime(2008, 9, 12, 16, 17))
self.assertEqual(f.a._value(), '2008/09/12 4:17 PM')
self.assertEqual(f.b.data, date(2006, 4, 5))
self.assertEqual(f.c.data, date(2004, 5, 6))
self.assertTrue(f.validate())
f = self.F(DummyPostData(a='Grok Grarg Rawr'))
self.assertFalse(f.validate())
def test_blank_input(self):
f = self.F(DummyPostData(a='', b=''))
self.assertEqual(f.a.data, None)
self.assertEqual(f.b.data, None)
self.assertFalse(f.validate())
def test_defaults_display(self):
f = self.F(a=datetime(2001, 11, 15))
self.assertEqual(f.a.data, datetime(2001, 11, 15))
self.assertEqual(f.a._value(), '2001-11-15 00:00')
self.assertEqual(f.b.data, date(2004, 9, 12))
self.assertEqual(f.b._value(), '2004-09-12')
self.assertEqual(f.c.data, None)
self.assertTrue(f.validate())
def test_render(self):
f = self.F()
self.assertEqual(f.b(), ur'<input id="b" name="b" type="text" value="2004-09-12">')
if __name__ == '__main__':
from unittest import main
main()
| Khan/wtforms | tests/ext_dateutil.py | Python | bsd-3-clause | 1,875 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSmoof(RPackage):
"""Provides generators for a high number of both single- and
multi- objective test functions which are frequently used for the
benchmarking of (numerical) optimization algorithms. Moreover, it offers
a set of convenient functions to generate, plot and work with objective
functions."""
homepage = "http://github.com/jakobbossek/smoof"
url = "https://cran.r-project.org/src/contrib/smoof_1.5.1.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/smoof"
version('1.5.1', 'c3e3b5dafed34608f933ae255cf49054')
version('1.5', 'b371bde2724eade5a6d4d808fa3ad269')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-plot3d', type=('build', 'run'))
depends_on('r-plotly', type=('build', 'run'))
depends_on('r-mco', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rjsonio', type=('build', 'run'))
depends_on('r-rcpparmadillo', type=('build', 'run'))
| krafczyk/spack | var/spack/repos/builtin/packages/r-smoof/package.py | Python | lgpl-2.1 | 2,511 |
# -*- coding: utf-8 -*-
#
# ,---------, ____ _ __
# | ,-^-, | / __ )(_) /_______________ _____ ___
# | ( O ) | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# | / ,--' | / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# +------` /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2021-2022 Bitcraze AB
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, in version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import numpy as np
import numpy.typing as npt
import scipy.optimize
from cflib.localization.lighthouse_types import Pose
class LighthouseSystemAligner:
@classmethod
def align(cls, origin: npt.ArrayLike, x_axis: list[npt.ArrayLike], xy_plane: list[npt.ArrayLike],
bs_poses: dict[int, Pose]) -> dict[int, Pose]:
"""
Align a coordinate system with the physical world. Finds the transform from the
current reference frame to one that is aligned with measured positions, and transforms base station
poses to the new coordinate system.
:param origin: The position of the desired origin in the current reference frame
:param x_axis: One or more positions on the desired positive X-axis (X>0, Y=Z=0) in the current
reference frame
:param x_axis: One or more positions in the desired XY-plane (Z=0) in the current reference frame
:param bs_poses: a dictionary with the base station poses in the current reference frame
:return: a dictionary with the base station poses in the desired reference frame
"""
raw_transformation = cls._find_transformation(origin, x_axis, xy_plane)
transformation = cls._de_flip_transformation(raw_transformation, x_axis, bs_poses)
result: dict[int, Pose] = {}
for bs_id, pose in bs_poses.items():
result[bs_id] = transformation.rotate_translate_pose(pose)
return result
@classmethod
def _find_transformation(cls, origin: npt.ArrayLike, x_axis: list[npt.ArrayLike],
xy_plane: list[npt.ArrayLike]) -> Pose:
"""
Finds the transformation from the current reference frame to a desired reference frame based on measured
positions of the desired reference frame.
:param origin: The position of the desired origin in the current reference frame
:param x_axis: One or more positions on the desired positive X-axis (X>0, Y=Z=0) in the current
reference frame
:param x_axis: One or more positions in the desired XY-plane (Z=0) in the current reference frame
:return: The transformation from the current reference frame to the desired reference frame. Note: the
solution may be flipped.
"""
args = (origin, x_axis, xy_plane)
x0 = np.zeros((6))
result = scipy.optimize.least_squares(cls._calc_residual,
x0, verbose=0,
jac_sparsity=None,
x_scale='jac',
ftol=1e-8,
method='trf',
max_nfev=10,
args=args)
return cls._Pose_from_params(result.x)
@classmethod
def _calc_residual(cls, params, origin: npt.ArrayLike, x_axis: list[npt.ArrayLike], xy_plane: list[npt.ArrayLike]):
transform = cls._Pose_from_params(params)
origin_diff = transform.rotate_translate(origin)
x_axis_diff = map(lambda x: transform.rotate_translate(x), x_axis)
xy_plane_diff = map(lambda x: transform.rotate_translate(x), xy_plane)
residual_origin = origin_diff
# Points on X-axis: ignore X
x_axis_residual = list(map(lambda x: x[1:3], x_axis_diff))
# Points in the XY-plane: ignore X and Y
xy_plane_residual = list(map(lambda x: x[2], xy_plane_diff))
residual = np.concatenate((np.ravel(residual_origin), np.ravel(x_axis_residual), np.ravel(xy_plane_residual)))
return residual
@classmethod
def _Pose_from_params(cls, params: npt.ArrayLike) -> Pose:
return Pose.from_rot_vec(R_vec=params[:3], t_vec=params[3:])
@classmethod
def _de_flip_transformation(cls, raw_transformation: Pose, x_axis: list[npt.ArrayLike],
bs_poses: dict[int, Pose]) -> Pose:
"""
Investigats a transformation and flips it if needed. This method assumes that
1. all base stations are at Z>0
2. x_axis samples are taken at X>0
"""
transformation = raw_transformation
# X-axis poses should be on the positivie X-axis, check that the "mean" of the x-axis points ends up at X>0
x_axis_mean = np.mean(x_axis, axis=0)
if raw_transformation.rotate_translate(x_axis_mean)[0] < 0.0:
flip_around_z_axis = Pose.from_rot_vec(R_vec=(0.0, 0.0, np.pi))
transformation = flip_around_z_axis.rotate_translate_pose(transformation)
# Base station poses should be above the floor, check the first one
bs_pose = list(bs_poses.values())[0]
if raw_transformation.rotate_translate(bs_pose.translation)[2] < 0.0:
flip_around_x_axis = Pose.from_rot_vec(R_vec=(np.pi, 0.0, 0.0))
transformation = flip_around_x_axis.rotate_translate_pose(transformation)
return transformation
| bitcraze/crazyflie-lib-python | cflib/localization/lighthouse_system_aligner.py | Python | gpl-2.0 | 6,025 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from .operations.metrics_operations import MetricsOperations
from .operations.events_operations import EventsOperations
from .operations.query_operations import QueryOperations
from . import models
class ApplicationInsightsDataClientConfiguration(Configuration):
"""Configuration for ApplicationInsightsDataClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if not base_url:
base_url = 'https://api.applicationinsights.io/v1'
super(ApplicationInsightsDataClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-applicationinsights/{}'.format(VERSION))
self.credentials = credentials
class ApplicationInsightsDataClient(SDKClient):
"""Composite Swagger for Application Insights Data Client
:ivar config: Configuration for client.
:vartype config: ApplicationInsightsDataClientConfiguration
:ivar metrics: Metrics operations
:vartype metrics: azure.applicationinsights.operations.MetricsOperations
:ivar events: Events operations
:vartype events: azure.applicationinsights.operations.EventsOperations
:ivar query: Query operations
:vartype query: azure.applicationinsights.operations.QueryOperations
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(
self, credentials, base_url=None):
self.config = ApplicationInsightsDataClientConfiguration(credentials, base_url)
super(ApplicationInsightsDataClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = 'v1'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.metrics = MetricsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.events = EventsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.query = QueryOperations(
self._client, self.config, self._serialize, self._deserialize)
| Azure/azure-sdk-for-python | sdk/applicationinsights/azure-applicationinsights/azure/applicationinsights/application_insights_data_client.py | Python | mit | 3,215 |
# -*- coding: utf-8 -*-
import terrariumLogging
logger = terrariumLogging.logging.getLogger(__name__)
import inspect
from importlib import import_module
import sys
from pathlib import Path
from hashlib import md5
from operator import itemgetter
from datetime import datetime, timedelta
from time import time
from gevent import sleep
from func_timeout import func_timeout, FunctionTimedOut
import re
import math
#import glob
import cv2
# pip install retry
from retry import retry
# pip install Pillow
from PIL import Image, ImageDraw, ImageFont
# pip install piexif
import piexif
from terrariumUtils import terrariumUtils, terrariumCache, classproperty
class terrariumWebcamException(TypeError):
'''There is a problem with loading a hardware switch. Invalid power switch action.'''
def __init__(self, message, *args):
self.message = message
super().__init__(message, *args)
class terrariumWebcamLoadingException(terrariumWebcamException):
pass
class terrariumWebcamUpdateException(terrariumWebcamException):
pass
class terrariumWebcamActionException(terrariumWebcamException):
pass
# https://www.bnmetrics.com/blog/factory-pattern-in-python3-simple-version
class terrariumWebcam(object):
HARDWARE = None
NAME = None
VALID_SOURCE = None
INFO_SOURCE = None
__STATIC_LOCATION = Path(__file__).parent.parent.parent / 'webcam'
#.parent.parent.joinpath('app/base/static/webcams/')
_STORE_LOCATION = Path('/dev/shm/webcam/')
_TILE_LOCATION = 'tiles/'
__ARCHIVE_LOCATION = __STATIC_LOCATION / 'archive/'
__TILE_SIZE = 256
__JPEG_QUALITY = 95
__FONT_SIZE = 10
__OFFLINE = 'offline'
__ONLINE = 'online'
__UPDATE_TIMEOUT = 1
__VALID_ROTATIONS = ['0','90','180','270','h','v']
_WARM_UP = 2
@classproperty
def available_hardware(__cls__):
__CACHE_KEY = 'known_webcams'
cache = terrariumCache()
data = cache.get_data(__CACHE_KEY)
if data is None:
data = {}
# Start dynamically loading sensors (based on: https://www.bnmetrics.com/blog/dynamic-import-in-python3)
for file in sorted(Path(__file__).parent.glob('*_webcam.py')):
imported_module = import_module( '.' + file.stem, package='{}'.format(__name__))
for i in dir(imported_module):
attribute = getattr(imported_module, i)
if inspect.isclass(attribute) and attribute != terrariumWebcam and issubclass(attribute, terrariumWebcam):
setattr(sys.modules[__name__], file.stem, attribute)
data[attribute.HARDWARE] = attribute
cache.set_data(__CACHE_KEY,data,-1)
return data
@classproperty
def available_webcams(__cls__):
data = []
for (hardware_type, webcam) in __cls__.available_hardware.items():
data.append({'hardware' : hardware_type, 'name' : webcam.NAME})
return sorted(data, key=itemgetter('name'))
# Return polymorph webcam....
def __new__(cls, id, address, name = '', rotation = '0', width = 640, height = 480, wb = 'auto'):
known_webcams = terrariumWebcam.available_hardware
# Check based on entered address, not type
for webcam_device in known_webcams:
if re.search(known_webcams[webcam_device].VALID_SOURCE, address, re.IGNORECASE):
return super(terrariumWebcam, cls).__new__(known_webcams[webcam_device])
raise terrariumWebcamException(f'Webcam url \'{address}\' is not valid! Please check your source')
def __init__(self, id, address, name = '', width = 640, height = 480, rotation = '0', awb = 'auto'):
self._device = {'device' : None,
'id' : None,
'address' : None,
'name' : None,
'rotation' : None,
'resolution' : None,
'awb' : None,
'last_update' : None,
'state' : True,
'max_zoom' : None,}
self.id = id
self.name = name
self.resolution = (width,height)
self.rotation = rotation
self.awb = awb
self.__last_archive_image = self.__get_last_archive_image()
self.__compare_image = None
# This will trigger a load hardware call when the address changes
self.address = address
store_location = Path(self._STORE_LOCATION).joinpath(self.id)
store_location.mkdir(parents=True,exist_ok=True)
sym_link = self.__STATIC_LOCATION.joinpath(self.id)
if not sym_link.is_symlink():
sym_link.symlink_to(store_location,target_is_directory=True)
if not self.live:
store_location.joinpath(self._TILE_LOCATION).mkdir(parents=True,exist_ok=True)
def __repr__(self):
return f'{self.NAME} named \'{self.name}\' at address \'{self.address}\''
@retry(tries=3, delay=0.5, max_delay=2)
def load_hardware(self):
try:
hardware = self._load_hardware()
except Exception as ex:
raise terrariumWebcamLoadingException(f'Unable to load webcam {self}: {ex}.')
if hardware is None:
raise terrariumWebcamLoadingException(f'Unable to load webcam {self}: Did not return a device.')
self._device['device'] = hardware
def __get_last_archive_image(self):
# Today archive path:
archive = self.__ARCHIVE_LOCATION / self.id / f'{datetime.now().strftime("%Y/%m/%d")}'
files = sorted(archive.glob('*.jpg'))
if len(files) == 0:
# Yesterday archive path:
archive = self.__ARCHIVE_LOCATION / self.id / f'{(datetime.now()-timedelta(days=1)).strftime("%Y/%m/%d")}'
files = sorted(archive.glob('*.jpg'))
if len(files) == 0:
# No archive files found in 24-48 hours history
return None
return files[-1]
def __rotate(self):
# Rotate image if needed
if self.__raw_image is None:
return
if '90' == self.rotation:
self.__raw_image = self.__raw_image.transpose(Image.ROTATE_90)
elif '180' == self.rotation:
self.__raw_image = self.__raw_image.transpose(Image.ROTATE_180)
elif '270' == self.rotation:
self.__raw_image = self.__raw_image.transpose(Image.ROTATE_270)
elif 'h' == self.rotation:
self.__raw_image = self.__raw_image.transpose(Image.FLIP_TOP_BOTTOM)
elif 'v' == self.rotation:
self.__raw_image = self.__raw_image.transpose(Image.FLIP_LEFT_RIGHT)
logger.debug('Rotated raw image %s to %s' % (self.name,self.rotation))
def __set_timestamp(self, image):
# Get the image dimensions
source_width, source_height = image.size
# Select font
font = ImageFont.truetype('fonts/DejaVuSans.ttf',self.__FONT_SIZE)
# Draw on image
draw = ImageDraw.Draw(image)
# Create black box on the bottom of the image
draw.rectangle([0,source_height-(self.__FONT_SIZE+2),source_width,source_height],fill='black')
# Draw the current time stamp on the image
draw.text((1, source_height-(self.__FONT_SIZE+1)), ('NoName' if self.name is None else self.name) + ' @ ' + (datetime.now()).strftime('%d/%m/%Y %H:%M:%S') ,(255,255,255),font=font)
def __tile_image(self):
starttime = time()
# Original width
source_width, source_height = self.__raw_image.size
# Calc new square canvas size
longest_side = float(max(source_width,source_height))
max_size = float(math.pow(2,math.ceil(math.log(longest_side,2))))
# Set canvas dimensions
canvas_width = canvas_height = max_size
resize_factor = max_size / longest_side
# Set raw image new dimensions
source_width *= resize_factor
source_height *= resize_factor
# Calculate the max zoom factor
zoom_factor = int(math.log(max_size/self.__TILE_SIZE,2))
self._device['max_zoom'] = zoom_factor
#self.__max_zoom = zoom_factor
logger.debug('Tiling image with source resolution %s, from %sx%s with resize factor %s in %s steps' %
('{}x{}'.format(self.width ,self.height), source_width,source_height,resize_factor, zoom_factor))
# as long as there is a new layer, continue
while zoom_factor >= 0:
# Create black canvas on zoom factor dimensions
logger.debug('Creating new black canvas with dimensions %sx%s' % (canvas_width,canvas_height))
canvas = Image.new("RGB", ((int(round(canvas_width)),int(round(canvas_height)))), "black")
# Scale the raw image to the zoomfactor dimensions
logger.debug('Scale raw image to new canvas size (%sx%s)' % (canvas_width,canvas_height))
source = self.__raw_image.resize((int(round(source_width)),int(round(source_height))))
# Set the timestamp on resized image
self.__set_timestamp(source)
# Calculate the center in the canvas for pasting raw image
paste_center_position = (int(round((canvas_width - source_width) / 2)),int(round((canvas_height - source_height) / 2)))
logger.debug('Pasting resized image to center of canvas at position %s' % (paste_center_position,))
canvas.paste(source,paste_center_position)
# Loop over the canvas to create the tiles
logger.debug('Creating the lose tiles with dimensions %sx%s' % (canvas_width, canvas_height,))
for row in range(0,int(math.ceil(canvas_height/self.__TILE_SIZE))):
for column in range(0,int(math.ceil(canvas_width/self.__TILE_SIZE))):
crop_size = ( int(row*self.__TILE_SIZE), int(column*self.__TILE_SIZE) ,int((row+1)*self.__TILE_SIZE), int((column+1)*self.__TILE_SIZE))
#logger.debug('Cropping image from position %s' % (crop_size,))
tile = canvas.crop(crop_size)
#logger.debug('Saving cropped image to %s' % (terrariumWebcamSource.TILE_LOCATION + self.__id + '_tile_' + str(zoom_factor) + '_' + str(row) + '_' + str(column) + '.jpg',))
tile_file_name = self.raw_image_path.parent.joinpath('tiles','tile_{}_{}_{}.jpg'.format(zoom_factor,row,column))
#print('Save tile: {}'.format(tile_file_name))
tile.save(tile_file_name,'jpeg',quality=self.__JPEG_QUALITY)
logger.debug('Done saving {}'.format(tile_file_name))
# Scale down by 50%
canvas_width /= 2.0
canvas_height /= 2.0
source_width /= 2.0
source_height /= 2.0
zoom_factor -= 1
logger.debug('Done tiling webcam image \'%s\' in %.5f seconds' % (self.name,time()-starttime))
def __set_offline_image(self):
def draw_text_center(im, draw, text, font, **kwargs):
text_height = text_top = None
linecounter = 0
for line in text:
text_size = draw.textsize(line, font)
if text_height is None:
text_height = len(text) * ( text_size[1])
text_top = (im.size[1] - text_height) / 2
draw.text(
((im.size[0] - text_size[0]) / 2, (text_top + (linecounter * text_height)) / 2),
line, font=font, **kwargs)
linecounter += 1
raw_image = Image.open(Path(__file__).parent.joinpath('images/webcam_offline.png'))
mask = Image.open(Path(__file__).parent.joinpath('images/mask_offline.png'))
draw = ImageDraw.Draw(mask)
font = ImageFont.truetype('fonts/DejaVuSans.ttf',40)
text = ['Offline since' + ':',datetime.now().strftime('%A %d %B %Y'),datetime.now().strftime('%H:%M:%S')]
draw_text_center(mask,draw,text,font)
mask_width, mask_height = mask.size
source_width, source_height = raw_image.size
raw_image.paste(mask, (int((source_width/2)-(mask_width/2)),int((source_height/2)-(mask_height/2))), mask)
# Upscale the error image, so the zoomfactors are still working...
logger.debug('Resize error image from {}x{} to {}x{} keeping aspect ratio.'.format(source_width, source_height,self.width, self.height))
raw_image.thumbnail( (self.width, self.height))
return raw_image
@property
def __exit_data(self):
# Add some exif data to the image
zeroth_ifd = {
piexif.ImageIFD.Artist: 'TerrariumPI',
piexif.ImageIFD.XResolution: (self.width, 1),
piexif.ImageIFD.YResolution: (self.height, 1),
piexif.ImageIFD.Software: 'TerrariumPI',
piexif.ImageIFD.ImageDescription: f'Webcam image from {self}',
piexif.ImageIFD.DateTime: datetime.now().strftime('%Y-%m-%d %H:%m:%S'),
piexif.ImageIFD.Copyright: f'(c) {datetime.now().year} - TerrariumPI',
}
exif_ifd = {
piexif.ExifIFD.DateTimeOriginal: datetime.now().strftime('%Y-%m-%d %H:%m:%S'),
}
exif_dict = {'0th': zeroth_ifd, 'Exif': exif_ifd}
try:
exif_bytes = piexif.dump(exif_dict)
except Exception as ex:
return None
return exif_bytes
@property
def address(self):
return self._device['address']
@address.setter
def address(self, value):
value = terrariumUtils.clean_address(value)
if value is not None and '' != value:
if self.address != value:
self._device['address'] = value
self.load_hardware()
@property
def awb(self):
return self._device['awb']
@awb.setter
def awb(self, value):
if value is not None and '' != str(value).strip():
self._device['awb'] = str(value).strip()
@property
def device(self):
return self._device['device']
@property
def id(self):
if self._device['id'] is None:
self._device['id'] = md5('{}{}'.format(self.HARDWARE, self.address).encode()).hexdigest()
return self._device['id']
@id.setter
def id(self, value):
if value is not None and '' != str(value).strip():
self._device['id'] = str(value).strip()
@property
def height(self):
return self._device['resolution'][1]
@property
def name(self):
return self._device['name']
@name.setter
def name(self, value):
if value is not None and '' != str(value).strip():
self._device['name'] = str(value).strip()
@property
def resolution(self):
return self._device['resolution']
@resolution.setter
def resolution(self, value):
if len(value) == 2:
self._device['resolution'] = value
@property
def rotation(self):
return self._device['rotation']
@rotation.setter
def rotation(self, value):
value = value.lower()
if value is not None and str(value).strip() in self.__VALID_ROTATIONS:
self._device['rotation'] = str(value).strip()
@property
def width(self):
return self._device['resolution'][0]
@property
def state(self):
return terrariumUtils.is_true(self._device['state'])
@property
def value(self):
return 'online' if self.state else 'offline'
@property
def live(self):
return self.HARDWARE.lower().endswith('-live')
@property
def last_update(self):
return self._device['last_update']
@property
def raw_image_path(self):
return self._STORE_LOCATION.joinpath(self.id,f'{self.id}_raw.jpg')
@property
def raw_archive_path(self):
return self.__ARCHIVE_LOCATION.joinpath(self.id, datetime.now().strftime('%Y/%m/%d'), f'{self.id}_archive_{int(time())}.jpg')
@retry(tries=3, delay=0.5, max_delay=2)
def update(self, relays = []):
if self._device['last_update'] is None or (datetime.now() - self._device['last_update']).total_seconds() > self.__UPDATE_TIMEOUT:
if len(relays) > 0:
for relay in relays:
relay.on()
sleep(1)
try:
image = func_timeout(10, self._get_raw_data)
except FunctionTimedOut:
logger.error(f'Webcam {self} timed out after 10 seconds during updating...')
image = False
# TODO: Need to raise an error, for the retry action?
# except Exception as ex:
# logger.error(f'Webcam {self} has exception: {ex}')
# image = False
if len(relays) > 0:
for relay in relays:
relay.off()
if image is False:
# Camera is offline!!
logger.warning('Webcam {} has errors!'.format(self.name))
if self.state:
self._device['state'] = False
logger.error('Webcam {} has gone offline! Please check your webcam connections.'.format(self.name))
self.__raw_image = self.__set_offline_image()
self.__tile_image()
self.__raw_image.save(self.raw_image_path,'jpeg', quality=self.__JPEG_QUALITY)
return False
self._device['state'] = True
self.__raw_image = Image.open(image)
# Test if image is correctly loaded....
self.__raw_image.getexif()
# After here, no errors should happen, the image data should be save and correct
if not self.live:
try:
self.__rotate()
self.__tile_image()
except Exception as ex:
logger.error(f'Could not process webcam image: {ex}')
return
self.__raw_image.save(self.raw_image_path,'jpeg', quality=self.__JPEG_QUALITY, exif=self.__exit_data)
self._device['last_update'] = datetime.now()
return self.value
def archive(self,timeout):
if not self.state:
return
archive = self.__last_archive_image is None or int(time() - self.__last_archive_image.stat().st_mtime) >= timeout
if archive:
self.__last_archive_image = self.raw_archive_path
self.__last_archive_image.parent.mkdir(parents=True,exist_ok=True)
self.__raw_image.save(self.__last_archive_image,'jpeg', quality=self.__JPEG_QUALITY, exif=self.__exit_data)
#self.__environment.notification.message('webcam_archive',self.get_data(),[archive_image])
def motion_capture(self, motion_frame = 'last', motion_threshold = 25, motion_area = 500, motion_boxes = 'green'):
if not self.state:
return
# https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
#try:
current_image = cv2.imread(str(self.raw_image_path))
current_image = cv2.cvtColor(current_image, cv2.COLOR_BGR2GRAY)
current_image = cv2.GaussianBlur(current_image, (21, 21), 0)
if self.__compare_image is None or self.__compare_image.shape[0] != current_image.shape[0] or self.__compare_image.shape[1] != current_image.shape[1]:
# If we have no previous image to compare, just set it to the current and we are done.
# OR when the dimensions changes. This will give an error when comparing...
self.__compare_image = current_image
return
threshold = cv2.threshold(cv2.absdiff(self.__compare_image, current_image), int(motion_threshold), 255, cv2.THRESH_BINARY)[1]
threshold = cv2.dilate(threshold, None, iterations=2)
# Different OpenCV versions (docker vs native)
try:
(cnts ,_) = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except:
(_,cnts ,_) = cv2.findContours(threshold.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# don't draw if motion boxes is disabled
# Color Red and Blue are swapped... very strange...
box_color = None
if 'red' == motion_boxes:
box_color = (0, 0, 255)
elif 'green' == motion_boxes:
box_color = (0, 255, 0)
elif 'blue' == motion_boxes:
box_color = (255, 0, 0)
# Reread the current image, as in the first part, we have changed the image with filters to motion detection
raw_image = cv2.imread(str(self.raw_image_path))
# loop over the contours
motion_detected = False
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < motion_area:
continue
motion_detected = True
# compute the bounding box for the contour, draw it on the frame with the selected color,
if box_color is not None:
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(raw_image, (x, y), (x + w, y + h), box_color, 2)
if motion_detected:
self.__last_archive_image = self.raw_archive_path
self.__last_archive_image.parent.mkdir(parents=True,exist_ok=True)
cv2.imwrite(str(self.raw_archive_path),raw_image)
# Store the current image for next comparison round.
self.__compare_image = current_image
logger.info(f'Saved webcam {self} image for archive due to motion detection')
#self.__environment.notification.message('webcam_motion',self.get_data(),[archive_image])
elif 'last' == motion_frame:
# Only store the current frame when we use the 'last' frame option
self.__compare_image = current_image
# TODO: What to stop....?
def stop(self):
pass
| theyosh/TerrariumPI | hardware/webcam/__init__.py | Python | gpl-3.0 | 20,193 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2009 Rene Liebscher
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
#
__revision__ = "$Id: COGS.py,v 1.4 2009/08/07 07:19:18 rliebscher Exp $"
from fuzzy.defuzzify.Base import Base,DefuzzificationException
import fuzzy.set.Singleton
class COGS(Base):
"""defuzzification for singletons."""
def __init__(self, INF=None, ACC=None, failsafe=None,*args,**keywords):
"""
@param failsafe: if is not possible to calculate a center of gravity,
return this value if not None or forward the exception
"""
super(COGS, self).__init__(INF,ACC,*args,**keywords)
self.failsafe = failsafe # which value if COG not calculable
def getValue(self,variable):
"""Defuzzyfication using center of gravity method."""
sum_1,sum_2 = 0.,0.
for adjective in variable.adjectives.values():
# get precomputed adjective set
set = adjective.set
if not isinstance(set,fuzzy.set.Singleton.Singleton):
raise DefuzzificationException("Only Singleton for COGS defuzzification allowed.")
a = (self.INF or self._INF)(set(set.x),adjective.getMembership())
sum_1 += set.x*a
sum_2 += a
try:
if sum_2 == 0.:
raise DefuzzificationException("No result, all singletons set to 0.")
return sum_1/sum_2
except:
# was not to calculate
if self.failsafe is not None:
# user gave us a value to return
return self.failsafe
else:
# forward exception
raise
| arruda/pyfuzzy | fuzzy/defuzzify/COGS.py | Python | lgpl-3.0 | 2,297 |
from pipeline import *
class SentenceLimiter:
"""
Limit the text, word boundaries and
sentence boundaries of a given document
to the number of sentences given
"""
def run(self, document, number_sentences):
"""
:param: number_sentences, starts with 0 for the fist sentence
"""
boundaries = (document.sentences_boundaries[0][0], document.sentences_boundaries[:number_sentences+1][-1][1])
document.text = document.text[boundaries[0]:boundaries[1]]
document.sentences_boundaries = self._limitSenteceBoundaries(document.sentences_boundaries, boundaries[1])
document.words_boundaries = self._limitWordBoundaries(document.words_boundaries, boundaries[1])
document.entities = self._limitEntities(document.entities, boundaries[1])
document.triples = self._limitTriples(document.triples, boundaries[1])
return document
def _limitSenteceBoundaries(self, sentences_boundaries, maxi):
sentences_boundaries_new = []
for sent in sentences_boundaries:
if sent[1] <= maxi:
sentences_boundaries_new.append(sent)
return sentences_boundaries_new
def _limitEntities(self, entities, maxi):
entities_new = []
for e in entities:
if e.boundaries[1] <= maxi:
entities_new.append(e)
return entities_new
def _limitTriples(self, triples, maxi):
triples_new = []
for t in triples:
if t.sentence_id == 0:
triples_new.append(t)
return triples_new
def _limitWordBoundaries(self, words_boundaries, maxi):
words_boundaries_new = []
for word in words_boundaries:
if word[1] <= maxi:
words_boundaries_new.append(word)
return words_boundaries_new
class MainEntityLimiter:
"""
Remove a document's content if the main entity is not aligned
"""
def run(self, document):
if not document.uri in [i.uri for i in document.entities]:
document = None
return document
class EntityTypeFilter:
"""
Remove all documents that are of a certain type
"""
def __init__(self, all_triples, entities):
"""
:param: input TripleReaderTriples object
:param: a list of entity that should be filtered
"""
self.wikidata_triples = all_triples
self.entities = entities
def run(self, document):
# P31: instance of
prop_id = 'http://www.wikidata.org/prop/direct/P31'
if any([i for i in self.wikidata_triples.get(document.docid) if i[1] == prop_id and i[2] in self.entities]):
document = None
return document
| hadyelsahar/RE-NLG-Dataset | pipeline/filter.py | Python | mit | 2,736 |
__author__ = '[email protected] (Paul Horn)'
#noinspection PyUnresolvedReferences
from .nstrie import NsTrie
| knutwalker/namespacetrie | namespacetrie/__init__.py | Python | gpl-3.0 | 113 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreatePage
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_Pages_CreatePage_async]
from google.cloud import dialogflowcx_v3
async def sample_create_page():
# Create a client
client = dialogflowcx_v3.PagesAsyncClient()
# Initialize request argument(s)
page = dialogflowcx_v3.Page()
page.display_name = "display_name_value"
request = dialogflowcx_v3.CreatePageRequest(
parent="parent_value",
page=page,
)
# Make the request
response = await client.create_page(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_Pages_CreatePage_async]
| googleapis/python-dialogflow-cx | samples/generated_samples/dialogflow_v3_generated_pages_create_page_async.py | Python | apache-2.0 | 1,546 |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation.
# Based on ryu/openvswitch agents.
#
# Copyright 2012 Isaku Yamahata <yamahata at private email ne jp>
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import socket
import sys
import time
from quantum.agent.linux import ovs_lib
from quantum.common import config as logging_config
from quantum.common import topics
from quantum.openstack.common import context
from quantum.openstack.common import log as logging
from quantum.openstack.common import rpc
from quantum.plugins.nec.common import config
LOG = logging.getLogger(__name__)
class NECQuantumAgent(object):
def __init__(self, integ_br, root_helper, polling_interval):
'''Constructor.
:param integ_br: name of the integration bridge.
:param root_helper: utility to use when running shell cmds.
:param polling_interval: interval (secs) to check the bridge.
'''
self.int_br = ovs_lib.OVSBridge(integ_br, root_helper)
self.polling_interval = polling_interval
self.host = socket.gethostname()
self.agent_id = 'nec-q-agent.%s' % self.host
self.datapath_id = "0x%s" % self.int_br.get_datapath_id()
# RPC network init
self.context = context.RequestContext('quantum', 'quantum',
is_admin=False)
self.conn = rpc.create_connection(new=True)
def update_ports(self, port_added=[], port_removed=[]):
"""RPC to update information of ports on Quantum Server"""
LOG.info("update ports: added=%s, removed=%s" %
(port_added, port_removed))
try:
rpc.call(self.context,
topics.PLUGIN,
{'method': 'update_ports',
'args': {'topic': topics.AGENT,
'agent_id': self.agent_id,
'datapath_id': self.datapath_id,
'port_added': port_added,
'port_removed': port_removed}})
except Exception as e:
LOG.warn("update_ports() failed.")
return
def _vif_port_to_port_info(self, vif_port):
return dict(id=vif_port.vif_id, port_no=vif_port.ofport,
mac=vif_port.vif_mac)
def daemon_loop(self):
"""Main processing loop for NEC Plugin Agent."""
old_ports = []
while True:
new_ports = []
port_added = []
for vif_port in self.int_br.get_vif_ports():
port_id = vif_port.vif_id
new_ports.append(port_id)
if port_id not in old_ports:
port_info = self._vif_port_to_port_info(vif_port)
port_added.append(port_info)
port_removed = []
for port_id in old_ports:
if port_id not in new_ports:
port_removed.append(port_id)
if port_added or port_removed:
self.update_ports(port_added, port_removed)
else:
LOG.debug("No port changed.")
old_ports = new_ports
time.sleep(self.polling_interval)
def main():
config.CONF(args=sys.argv, project='quantum')
logging_config.setup_logging(config.CONF)
# Determine which agent type to use.
integ_br = config.OVS.integration_bridge
root_helper = config.AGENT.root_helper
polling_interval = config.AGENT.polling_interval
agent = NECQuantumAgent(integ_br, root_helper, polling_interval)
# Start everything.
agent.daemon_loop()
sys.exit(0)
if __name__ == "__main__":
main()
| aristanetworks/arista-ovs-quantum | quantum/plugins/nec/agent/nec_quantum_agent.py | Python | apache-2.0 | 4,343 |
# Copyright (C) 2006-2009 Novell Inc. All rights reserved.
# This program is free software; it may be used, copied, modified
# and distributed under the terms of the GNU General Public Licence,
# either version 2, or version 3 (at your option).
from __future__ import print_function
"""Read osc configuration and store it in a dictionary
This module reads and parses ~/.oscrc. The resulting configuration is stored
for later usage in a dictionary named 'config'.
The .oscrc is kept mode 0600, so that it is not publically readable.
This gives no real security for storing passwords.
If in doubt, use your favourite keyring.
Password is stored on ~/.oscrc as bz2 compressed and base64 encoded, so that is fairly
large and not to be recognized or remembered easily by an occasional spectator.
If information is missing, it asks the user questions.
After reading the config, urllib2 is initialized.
The configuration dictionary could look like this:
{'apisrv': 'https://api.opensuse.org/',
'user': 'joe',
'api_host_options': {'api.opensuse.org': {'user': 'joe', 'pass': 'secret'},
'apitest.opensuse.org': {'user': 'joe', 'pass': 'secret',
'http_headers':(('Host','api.suse.de'),
('User','faye'))},
'foo.opensuse.org': {'user': 'foo', 'pass': 'foo'}},
'build-cmd': '/usr/bin/build',
'build-root': '/abuild/oscbuild-%(repo)s-%(arch)s',
'packagecachedir': '/var/cache/osbuild',
'su-wrapper': 'sudo',
}
"""
import bz2
import base64
import os
import re
import sys
import ssl
try:
from http.cookiejar import LWPCookieJar, CookieJar
from http.client import HTTPConnection, HTTPResponse
from io import StringIO
from urllib.parse import urlsplit
from urllib.error import URLError
from urllib.request import HTTPBasicAuthHandler, HTTPCookieProcessor, HTTPPasswordMgrWithDefaultRealm, ProxyHandler
from urllib.request import AbstractHTTPHandler, build_opener, proxy_bypass, HTTPSHandler
except ImportError:
#python 2.x
from cookielib import LWPCookieJar, CookieJar
from httplib import HTTPConnection, HTTPResponse
from StringIO import StringIO
from urlparse import urlsplit
from urllib2 import URLError, HTTPBasicAuthHandler, HTTPCookieProcessor, HTTPPasswordMgrWithDefaultRealm, ProxyHandler
from urllib2 import AbstractHTTPHandler, build_opener, proxy_bypass, HTTPSHandler
from . import OscConfigParser
from osc import oscerr
from .oscsslexcp import NoSecureSSLError
GENERIC_KEYRING = False
GNOME_KEYRING = False
try:
import keyring
GENERIC_KEYRING = True
except:
try:
import gobject
gobject.set_application_name('osc')
import gnomekeyring
if os.environ['GNOME_DESKTOP_SESSION_ID']:
# otherwise gnome keyring bindings spit out errors, when you have
# it installed, but you are not under gnome
# (even though hundreds of gnome-keyring daemons got started in parallel)
# another option would be to support kwallet here
GNOME_KEYRING = gnomekeyring.is_available()
except:
pass
def _get_processors():
"""
get number of processors (online) based on
SC_NPROCESSORS_ONLN (returns 1 if config name does not exist).
"""
try:
return os.sysconf('SC_NPROCESSORS_ONLN')
except ValueError as e:
return 1
DEFAULTS = {'apiurl': 'https://api.opensuse.org',
'user': 'your_username',
'pass': 'your_password',
'passx': '',
'packagecachedir': '/var/tmp/osbuild-packagecache',
'su-wrapper': 'sudo',
# build type settings
'build-cmd': '/usr/bin/build',
'build-type': '', # may be empty for chroot, kvm or xen
'build-root': '/var/tmp/build-root/%(repo)s-%(arch)s',
'build-uid': '', # use the default provided by build
'build-device': '', # required for VM builds
'build-memory': '', # required for VM builds
'build-swap': '', # optional for VM builds
'build-vmdisk-rootsize': '', # optional for VM builds
'build-vmdisk-swapsize': '', # optional for VM builds
'build-vmdisk-filesystem': '', # optional for VM builds
'build-vm-user': '', # optional for VM builds
'build-kernel': '', # optional for VM builds
'build-initrd': '', # optional for VM builds
'build-jobs': _get_processors(),
'builtin_signature_check': '1', # by default use builtin check for verify pkgs
'icecream': '0',
'buildlog_strip_time': '0', # strips the build time from the build log
'debug': '0',
'http_debug': '0',
'http_full_debug': '0',
'http_retries': '3',
'verbose': '1',
'no_preinstallimage': '0',
'traceback': '0',
'post_mortem': '0',
'use_keyring': '0',
'gnome_keyring': '0',
'cookiejar': '~/.osc_cookiejar',
# fallback for osc build option --no-verify
'no_verify': '0',
# enable project tracking by default
'do_package_tracking': '1',
# default for osc build
'extra-pkgs': '',
# default repository
'build_repository': 'openSUSE_Factory',
# default project for branch or bco
'getpac_default_project': 'openSUSE:Factory',
# alternate filesystem layout: have multiple subdirs, where colons were.
'checkout_no_colon': '0',
# change filesystem layout: avoid checkout from within a proj or package dir.
'checkout_rooted': '0',
# local files to ignore with status, addremove, ....
'exclude_glob': '.osc CVS .svn .* _linkerror *~ #*# *.orig *.bak *.changes.vctmp.*',
# whether to keep passwords in plaintext.
'plaintext_passwd': '1',
# limit the age of requests shown with 'osc req list'.
# this is a default only, can be overridden by 'osc req list -D NNN'
# Use 0 for unlimted.
'request_list_days': 0,
# check for unversioned/removed files before commit
'check_filelist': '1',
# check for pending requests after executing an action (e.g. checkout, update, commit)
'check_for_request_on_action': '0',
# what to do with the source package if the submitrequest has been accepted
'submitrequest_on_accept_action': '',
'request_show_interactive': '0',
'request_show_source_buildstatus': '0',
# if a review is accepted in interactive mode and a group
# was specified the review will be accepted for this group
'review_inherit_group': '0',
'submitrequest_accepted_template': '',
'submitrequest_declined_template': '',
'linkcontrol': '0',
'include_request_from_project': '1',
'local_service_run': '1',
# Maintenance defaults to OBS instance defaults
'maintained_attribute': 'OBS:Maintained',
'maintenance_attribute': 'OBS:MaintenanceProject',
'maintained_update_project_attribute': 'OBS:UpdateProject',
'show_download_progress': '0',
# path to the vc script
'vc-cmd': '/usr/lib/build/vc'
}
# being global to this module, this dict can be accessed from outside
# it will hold the parsed configuration
config = DEFAULTS.copy()
boolean_opts = ['debug', 'do_package_tracking', 'http_debug', 'post_mortem', 'traceback', 'check_filelist', 'plaintext_passwd',
'checkout_no_colon', 'checkout_rooted', 'check_for_request_on_action', 'linkcontrol', 'show_download_progress', 'request_show_interactive',
'request_show_source_buildstatus', 'review_inherit_group', 'use_keyring', 'gnome_keyring', 'no_verify', 'builtin_signature_check',
'http_full_debug', 'include_request_from_project', 'local_service_run', 'buildlog_strip_time', 'no_preinstallimage']
api_host_options = ['user', 'pass', 'passx', 'aliases', 'http_headers', 'email', 'sslcertck', 'cafile', 'capath', 'trusted_prj']
new_conf_template = """
[general]
# URL to access API server, e.g. %(apiurl)s
# you also need a section [%(apiurl)s] with the credentials
apiurl = %(apiurl)s
# Downloaded packages are cached here. Must be writable by you.
#packagecachedir = %(packagecachedir)s
# Wrapper to call build as root (sudo, su -, ...)
#su-wrapper = %(su-wrapper)s
# rootdir to setup the chroot environment
# can contain %%(repo)s, %%(arch)s, %%(project)s, %%(package)s and %%(apihost)s (apihost is the hostname
# extracted from currently used apiurl) for replacement, e.g.
# /srv/oscbuild/%%(repo)s-%%(arch)s or
# /srv/oscbuild/%%(repo)s-%%(arch)s-%%(project)s-%%(package)s
#build-root = %(build-root)s
# compile with N jobs (default: "getconf _NPROCESSORS_ONLN")
#build-jobs = N
# build-type to use - values can be (depending on the capabilities of the 'build' script)
# empty - chroot build
# kvm - kvm VM build (needs build-device, build-swap, build-memory)
# xen - xen VM build (needs build-device, build-swap, build-memory)
# experimental:
# qemu - qemu VM build
# lxc - lxc build
#build-type =
# build-device is the disk-image file to use as root for VM builds
# e.g. /var/tmp/FILE.root
#build-device = /var/tmp/FILE.root
# build-swap is the disk-image to use as swap for VM builds
# e.g. /var/tmp/FILE.swap
#build-swap = /var/tmp/FILE.swap
# build-kernel is the boot kernel used for VM builds
#build-kernel = /boot/vmlinuz
# build-initrd is the boot initrd used for VM builds
#build-initrd = /boot/initrd
# build-memory is the amount of memory used in the VM
# value in MB - e.g. 512
#build-memory = 512
# build-vmdisk-rootsize is the size of the disk-image used as root in a VM build
# values in MB - e.g. 4096
#build-vmdisk-rootsize = 4096
# build-vmdisk-swapsize is the size of the disk-image used as swap in a VM build
# values in MB - e.g. 1024
#build-vmdisk-swapsize = 1024
# build-vmdisk-filesystem is the file system type of the disk-image used in a VM build
# values are ext3(default) ext4 xfs reiserfs btrfs
#build-vmdisk-filesystem = ext4
# Numeric uid:gid to assign to the "abuild" user in the build-root
# or "caller" to use the current users uid:gid
# This is convenient when sharing the buildroot with ordinary userids
# on the host.
# This should not be 0
# build-uid =
# strip leading build time information from the build log
# buildlog_strip_time = 1
# extra packages to install when building packages locally (osc build)
# this corresponds to osc build's -x option and can be overridden with that
# -x '' can also be given on the command line to override this setting, or
# you can have an empty setting here.
#extra-pkgs = vim gdb strace
# build platform is used if the platform argument is omitted to osc build
#build_repository = %(build_repository)s
# default project for getpac or bco
#getpac_default_project = %(getpac_default_project)s
# alternate filesystem layout: have multiple subdirs, where colons were.
#checkout_no_colon = %(checkout_no_colon)s
# change filesystem layout: avoid checkout within a project or package dir.
#checkout_rooted = %(checkout_rooted)s
# local files to ignore with status, addremove, ....
#exclude_glob = %(exclude_glob)s
# keep passwords in plaintext.
# Set to 0 to obfuscate passwords. It's no real security, just
# prevents most people from remembering your password if they watch
# you editing this file.
#plaintext_passwd = %(plaintext_passwd)s
# limit the age of requests shown with 'osc req list'.
# this is a default only, can be overridden by 'osc req list -D NNN'
# Use 0 for unlimted.
#request_list_days = %(request_list_days)s
# show info useful for debugging
#debug = 1
# show HTTP traffic useful for debugging
#http_debug = 1
# number of retries on HTTP transfer
#http_retries = 3
# Skip signature verification of packages used for build.
#no_verify = 1
# jump into the debugger in case of errors
#post_mortem = 1
# print call traces in case of errors
#traceback = 1
# use KDE/Gnome/MacOS/Windows keyring for credentials if available
#use_keyring = 1
# check for unversioned/removed files before commit
#check_filelist = 1
# check for pending requests after executing an action (e.g. checkout, update, commit)
#check_for_request_on_action = 0
# what to do with the source package if the submitrequest has been accepted. If
# nothing is specified the API default is used
#submitrequest_on_accept_action = cleanup|update|noupdate
# template for an accepted submitrequest
#submitrequest_accepted_template = Hi %%(who)s,\\n
# thanks for working on:\\t%%(tgt_project)s/%%(tgt_package)s.
# SR %%(reqid)s has been accepted.\\n\\nYour maintainers
# template for a declined submitrequest
#submitrequest_declined_template = Hi %%(who)s,\\n
# sorry your SR %%(reqid)s (request type: %%(type)s) for
# %%(tgt_project)s/%%(tgt_package)s has been declined because...
#review requests interactively (default: off)
#request_show_review = 1
# if a review is accepted in interactive mode and a group
# was specified the review will be accepted for this group (default: off)
#review_inherit_group = 1
[%(apiurl)s]
user = %(user)s
pass = %(pass)s
# set aliases for this apiurl
# aliases = foo, bar
# email used in .changes, unless the one from osc meta prj <user> will be used
# email =
# additional headers to pass to a request, e.g. for special authentication
#http_headers = Host: foofoobar,
# User: mumblegack
# Plain text password
#pass =
# Force using of keyring for this API
#keyring = 1
"""
account_not_configured_text = """
Your user account / password are not configured yet.
You will be asked for them below, and they will be stored in
%s for future use.
"""
config_incomplete_text = """
Your configuration file %s is not complete.
Make sure that it has a [general] section.
(You can copy&paste the below. Some commented defaults are shown.)
"""
config_missing_apiurl_text = """
the apiurl \'%s\' does not exist in the config file. Please enter
your credentials for this apiurl.
"""
cookiejar = None
def parse_apisrv_url(scheme, apisrv):
if apisrv.startswith('http://') or apisrv.startswith('https://'):
url = apisrv
elif scheme != None:
url = scheme + apisrv
else:
msg = 'invalid apiurl \'%s\' (specify the protocol (http:// or https://))' % apisrv
raise URLError(msg)
scheme, url, path = urlsplit(url)[0:3]
return scheme, url, path.rstrip('/')
def urljoin(scheme, apisrv, path=''):
return '://'.join([scheme, apisrv]) + path
def is_known_apiurl(url):
"""returns true if url is a known apiurl"""
apiurl = urljoin(*parse_apisrv_url(None, url))
return apiurl in config['api_host_options']
def extract_known_apiurl(url):
"""
Return longest prefix of given url that is known apiurl,
None if there is no known apiurl that is prefix of given url.
"""
scheme, host, path = parse_apisrv_url(None, url)
p = path.split('/')
while p:
apiurl = urljoin(scheme, host, '/'.join(p))
if apiurl in config['api_host_options']:
return apiurl
p.pop()
return None
def get_apiurl_api_host_options(apiurl):
"""
Returns all apihost specific options for the given apiurl, None if
no such specific optiosn exist.
"""
# FIXME: in A Better World (tm) there was a config object which
# knows this instead of having to extract it from a url where it
# had been mingled into before. But this works fine for now.
apiurl = urljoin(*parse_apisrv_url(None, apiurl))
if is_known_apiurl(apiurl):
return config['api_host_options'][apiurl]
raise oscerr.ConfigMissingApiurl('missing credentials for apiurl: \'%s\'' % apiurl,
'', apiurl)
def get_apiurl_usr(apiurl):
"""
returns the user for this host - if this host does not exist in the
internal api_host_options the default user is returned.
"""
# FIXME: maybe there should be defaults not just for the user but
# for all apihost specific options. The ConfigParser class
# actually even does this but for some reason we don't use it
# (yet?).
try:
return get_apiurl_api_host_options(apiurl)['user']
except KeyError:
print('no specific section found in config file for host of [\'%s\'] - using default user: \'%s\'' \
% (apiurl, config['user']), file=sys.stderr)
return config['user']
# workaround m2crypto issue:
# if multiple SSL.Context objects are created
# m2crypto only uses the last object which was created.
# So we need to build a new opener everytime we switch the
# apiurl (because different apiurls may have different
# cafile/capath locations)
def _build_opener(apiurl):
from osc.core import __version__
global config
if 'last_opener' not in _build_opener.__dict__:
_build_opener.last_opener = (None, None)
if apiurl == _build_opener.last_opener[0]:
return _build_opener.last_opener[1]
# respect no_proxy env variable
if proxy_bypass(apiurl):
# initialize with empty dict
proxyhandler = ProxyHandler({})
else:
# read proxies from env
proxyhandler = ProxyHandler()
# workaround for http://bugs.python.org/issue9639
authhandler_class = HTTPBasicAuthHandler
if sys.version_info >= (2, 6, 6) and sys.version_info < (2, 7, 1) \
and not 'reset_retry_count' in dir(HTTPBasicAuthHandler):
print('warning: your urllib2 version seems to be broken. ' \
'Using a workaround for http://bugs.python.org/issue9639', file=sys.stderr)
class OscHTTPBasicAuthHandler(HTTPBasicAuthHandler):
def http_error_401(self, *args):
response = HTTPBasicAuthHandler.http_error_401(self, *args)
self.retried = 0
return response
def http_error_404(self, *args):
self.retried = 0
return None
authhandler_class = OscHTTPBasicAuthHandler
elif sys.version_info >= (2, 6, 6) and sys.version_info < (2, 7, 1):
class OscHTTPBasicAuthHandler(HTTPBasicAuthHandler):
def http_error_404(self, *args):
self.reset_retry_count()
return None
authhandler_class = OscHTTPBasicAuthHandler
elif sys.version_info >= (2, 6, 5) and sys.version_info < (2, 6, 6):
# workaround for broken urllib2 in python 2.6.5: wrong credentials
# lead to an infinite recursion
class OscHTTPBasicAuthHandler(HTTPBasicAuthHandler):
def retry_http_basic_auth(self, host, req, realm):
# don't retry if auth failed
if req.get_header(self.auth_header, None) is not None:
return None
return HTTPBasicAuthHandler.retry_http_basic_auth(self, host, req, realm)
authhandler_class = OscHTTPBasicAuthHandler
options = config['api_host_options'][apiurl]
# with None as first argument, it will always use this username/password
# combination for urls for which arg2 (apisrv) is a super-url
authhandler = authhandler_class( \
HTTPPasswordMgrWithDefaultRealm())
authhandler.add_password(None, apiurl, options['user'], options['pass'])
if options['sslcertck']:
try:
from . import oscssl
from M2Crypto import m2urllib2
except ImportError as e:
print(e)
raise NoSecureSSLError('M2Crypto is needed to access %s in a secure way.\nPlease install python-m2crypto.' % apiurl)
cafile = options.get('cafile', None)
capath = options.get('capath', None)
if not cafile and not capath:
for i in ['/etc/pki/tls/cert.pem', '/etc/ssl/certs']:
if os.path.isfile(i):
cafile = i
break
elif os.path.isdir(i):
capath = i
break
if not cafile and not capath:
raise oscerr.OscIOError(None, 'No CA certificates found')
ctx = oscssl.mySSLContext()
if ctx.load_verify_locations(capath=capath, cafile=cafile) != 1:
raise oscerr.OscIOError(None, 'No CA certificates found')
opener = m2urllib2.build_opener(ctx, oscssl.myHTTPSHandler(ssl_context=ctx, appname='osc'), HTTPCookieProcessor(cookiejar), authhandler, proxyhandler)
else:
handlers = [HTTPCookieProcessor(cookiejar), authhandler, proxyhandler]
try:
# disable ssl cert check in python >= 2.7.9
ctx = ssl._create_unverified_context()
handlers.append(HTTPSHandler(context=ctx))
except AttributeError:
pass
print("WARNING: SSL certificate checks disabled. Connection is insecure!\n", file=sys.stderr)
opener = build_opener(*handlers)
opener.addheaders = [('User-agent', 'osc/%s' % __version__)]
_build_opener.last_opener = (apiurl, opener)
return opener
def init_basicauth(config):
"""initialize urllib2 with the credentials for Basic Authentication"""
def filterhdrs(meth, ishdr, *hdrs):
# this is so ugly but httplib doesn't use
# a logger object or such
def new_method(self, *args, **kwargs):
# check if this is a recursive call (note: we do not
# have to care about thread safety)
is_rec_call = getattr(self, '_orig_stdout', None) is not None
try:
if not is_rec_call:
self._orig_stdout = sys.stdout
sys.stdout = StringIO()
meth(self, *args, **kwargs)
hdr = sys.stdout.getvalue()
finally:
# restore original stdout
if not is_rec_call:
sys.stdout = self._orig_stdout
del self._orig_stdout
for i in hdrs:
if ishdr:
hdr = re.sub(r'%s:[^\\r]*\\r\\n' % i, '', hdr)
else:
hdr = re.sub(i, '', hdr)
sys.stdout.write(hdr)
new_method.__name__ = meth.__name__
return new_method
if config['http_debug'] and not config['http_full_debug']:
HTTPConnection.send = filterhdrs(HTTPConnection.send, True, 'Cookie', 'Authorization')
HTTPResponse.begin = filterhdrs(HTTPResponse.begin, False, 'header: Set-Cookie.*\n')
if sys.version_info < (2, 6):
# HTTPS proxy is not supported in old urllib2. It only leads to an error
# or, at best, a warning.
if 'https_proxy' in os.environ:
del os.environ['https_proxy']
if 'HTTPS_PROXY' in os.environ:
del os.environ['HTTPS_PROXY']
if config['http_debug']:
# brute force
def urllib2_debug_init(self, debuglevel=0):
self._debuglevel = 1
AbstractHTTPHandler.__init__ = urllib2_debug_init
cookie_file = os.path.expanduser(config['cookiejar'])
global cookiejar
cookiejar = LWPCookieJar(cookie_file)
try:
cookiejar.load(ignore_discard=True)
except IOError:
try:
fd = os.open(cookie_file, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600)
os.close(fd)
except IOError:
# hmm is any good reason why we should catch the IOError?
#print 'Unable to create cookiejar file: \'%s\'. Using RAM-based cookies.' % cookie_file
cookiejar = CookieJar()
def get_configParser(conffile=None, force_read=False):
"""
Returns an ConfigParser() object. After its first invocation the
ConfigParser object is stored in a method attribute and this attribute
is returned unless you pass force_read=True.
"""
conffile = conffile or os.environ.get('OSC_CONFIG', '~/.oscrc')
conffile = os.path.expanduser(conffile)
if 'conffile' not in get_configParser.__dict__:
get_configParser.conffile = conffile
if force_read or 'cp' not in get_configParser.__dict__ or conffile != get_configParser.conffile:
get_configParser.cp = OscConfigParser.OscConfigParser(DEFAULTS)
get_configParser.cp.read(conffile)
get_configParser.conffile = conffile
return get_configParser.cp
def write_config(fname, cp):
"""write new configfile in a safe way"""
if os.path.exists(fname) and not os.path.isfile(fname):
# only write to a regular file
return
with open(fname + '.new', 'w') as f:
cp.write(f, comments=True)
try:
os.rename(fname + '.new', fname)
os.chmod(fname, 0o600)
except:
if os.path.exists(fname + '.new'):
os.unlink(fname + '.new')
raise
def config_set_option(section, opt, val=None, delete=False, update=True, **kwargs):
"""
Sets a config option. If val is not specified the current/default value is
returned. If val is specified, opt is set to val and the new value is returned.
If an option was modified get_config is called with **kwargs unless update is set
to False (override_conffile defaults to config['conffile']).
If val is not specified and delete is True then the option is removed from the
config/reset to the default value.
"""
cp = get_configParser(config['conffile'])
# don't allow "internal" options
general_opts = [i for i in DEFAULTS.keys() if not i in ['user', 'pass', 'passx']]
if section != 'general':
section = config['apiurl_aliases'].get(section, section)
scheme, host, path = \
parse_apisrv_url(config.get('scheme', 'https'), section)
section = urljoin(scheme, host, path)
sections = {}
for url in cp.sections():
if url == 'general':
sections[url] = url
else:
scheme, host, path = \
parse_apisrv_url(config.get('scheme', 'https'), url)
apiurl = urljoin(scheme, host, path)
sections[apiurl] = url
section = sections.get(section.rstrip('/'), section)
if not section in cp.sections():
raise oscerr.ConfigError('unknown section \'%s\'' % section, config['conffile'])
if section == 'general' and not opt in general_opts or \
section != 'general' and not opt in api_host_options:
raise oscerr.ConfigError('unknown config option \'%s\'' % opt, config['conffile'])
run = False
if val:
cp.set(section, opt, val)
write_config(config['conffile'], cp)
run = True
elif delete and cp.has_option(section, opt):
cp.remove_option(section, opt)
write_config(config['conffile'], cp)
run = True
if run and update:
kw = {'override_conffile': config['conffile'],
'override_no_keyring': config['use_keyring'],
'override_no_gnome_keyring': config['gnome_keyring']}
kw.update(kwargs)
get_config(**kw)
if cp.has_option(section, opt):
return (opt, cp.get(section, opt, raw=True))
return (opt, None)
def passx_decode(passx):
"""decode the obfuscated password back to plain text password"""
return bz2.decompress(base64.b64decode(passx.encode("ascii"))).decode("ascii")
def passx_encode(passwd):
"""encode plain text password to obfuscated form"""
return base64.b64encode(bz2.compress(passwd.encode('ascii'))).decode("ascii")
def write_initial_config(conffile, entries, custom_template=''):
"""
write osc's intial configuration file. entries is a dict which contains values
for the config file (e.g. { 'user' : 'username', 'pass' : 'password' } ).
custom_template is an optional configuration template.
"""
conf_template = custom_template or new_conf_template
config = DEFAULTS.copy()
config.update(entries)
# at this point use_keyring and gnome_keyring are str objects
if config['use_keyring'] == '1' and GENERIC_KEYRING:
protocol, host, path = \
parse_apisrv_url(None, config['apiurl'])
keyring.set_password(host, config['user'], config['pass'])
config['pass'] = ''
config['passx'] = ''
elif config['gnome_keyring'] == '1' and GNOME_KEYRING:
protocol, host, path = \
parse_apisrv_url(None, config['apiurl'])
gnomekeyring.set_network_password_sync(
user=config['user'],
password=config['pass'],
protocol=protocol,
server=host,
object=path)
config['user'] = ''
config['pass'] = ''
config['passx'] = ''
if not config['plaintext_passwd']:
config['pass'] = ''
else:
config['passx'] = passx_encode(config['pass'])
sio = StringIO(conf_template.strip() % config)
cp = OscConfigParser.OscConfigParser(DEFAULTS)
cp.readfp(sio)
write_config(conffile, cp)
def add_section(filename, url, user, passwd):
"""
Add a section to config file for new api url.
"""
global config
cp = get_configParser(filename)
try:
cp.add_section(url)
except OscConfigParser.configparser.DuplicateSectionError:
# Section might have existed, but was empty
pass
if config['use_keyring'] and GENERIC_KEYRING:
protocol, host, path = parse_apisrv_url(None, url)
keyring.set_password(host, user, passwd)
cp.set(url, 'keyring', '1')
cp.set(url, 'user', user)
cp.remove_option(url, 'pass')
cp.remove_option(url, 'passx')
elif config['gnome_keyring'] and GNOME_KEYRING:
protocol, host, path = parse_apisrv_url(None, url)
gnomekeyring.set_network_password_sync(
user=user,
password=passwd,
protocol=protocol,
server=host,
object=path)
cp.set(url, 'keyring', '1')
cp.remove_option(url, 'pass')
cp.remove_option(url, 'passx')
else:
cp.set(url, 'user', user)
if not config['plaintext_passwd']:
cp.remove_option(url, 'pass')
cp.set(url, 'passx', passx_encode(passwd))
else:
cp.remove_option(url, 'passx')
cp.set(url, 'pass', passwd)
write_config(filename, cp)
def get_config(override_conffile=None,
override_apiurl=None,
override_debug=None,
override_http_debug=None,
override_http_full_debug=None,
override_traceback=None,
override_post_mortem=None,
override_no_keyring=None,
override_no_gnome_keyring=None,
override_verbose=None):
"""do the actual work (see module documentation)"""
global config
conffile = override_conffile or os.environ.get('OSC_CONFIG', '~/.oscrc')
conffile = os.path.expanduser(conffile)
if not os.path.exists(conffile):
raise oscerr.NoConfigfile(conffile, \
account_not_configured_text % conffile)
# okay, we made sure that .oscrc exists
# make sure it is not world readable, it may contain a password.
os.chmod(conffile, 0o600)
cp = get_configParser(conffile)
if not cp.has_section('general'):
# FIXME: it might be sufficient to just assume defaults?
msg = config_incomplete_text % conffile
msg += new_conf_template % DEFAULTS
raise oscerr.ConfigError(msg, conffile)
config = dict(cp.items('general', raw=1))
config['conffile'] = conffile
for i in boolean_opts:
try:
config[i] = cp.getboolean('general', i)
except ValueError as e:
raise oscerr.ConfigError('cannot parse \'%s\' setting: ' % i + str(e), conffile)
config['packagecachedir'] = os.path.expanduser(config['packagecachedir'])
config['exclude_glob'] = config['exclude_glob'].split()
re_clist = re.compile('[, ]+')
config['extra-pkgs'] = [i.strip() for i in re_clist.split(config['extra-pkgs'].strip()) if i]
# collect the usernames, passwords and additional options for each api host
api_host_options = {}
# Regexp to split extra http headers into a dictionary
# the text to be matched looks essentially looks this:
# "Attribute1: value1, Attribute2: value2, ..."
# there may be arbitray leading and intermitting whitespace.
# the following regexp does _not_ support quoted commas within the value.
http_header_regexp = re.compile(r"\s*(.*?)\s*:\s*(.*?)\s*(?:,\s*|\Z)")
# override values which we were called with
# This needs to be done before processing API sections as it might be already used there
if override_no_keyring:
config['use_keyring'] = False
if override_no_gnome_keyring:
config['gnome_keyring'] = False
aliases = {}
for url in [x for x in cp.sections() if x != 'general']:
# backward compatiblity
scheme, host, path = parse_apisrv_url(config.get('scheme', 'https'), url)
apiurl = urljoin(scheme, host, path)
user = None
password = None
if config['use_keyring'] and GENERIC_KEYRING:
try:
# Read from keyring lib if available
user = cp.get(url, 'user', raw=True)
password = str(keyring.get_password(host, user))
except:
# Fallback to file based auth.
pass
elif config['gnome_keyring'] and GNOME_KEYRING:
# Read from gnome keyring if available
try:
gk_data = gnomekeyring.find_network_password_sync(protocol=scheme, server=host, object=path)
if not 'user' in gk_data[0]:
raise oscerr.ConfigError('no user found in keyring', conffile)
user = gk_data[0]['user']
if 'password' in gk_data[0]:
password = str(gk_data[0]['password'])
else:
# this is most likely an error
print('warning: no password found in keyring', file=sys.stderr)
except gnomekeyring.NoMatchError:
# Fallback to file based auth.
pass
if not user is None and len(user) == 0:
user = None
print('Warning: blank user in the keyring for the ' \
'apiurl %s.\nPlease fix your keyring entry.', file=sys.stderr)
if user is not None and password is None:
err = ('no password defined for "%s".\nPlease fix your keyring '
'entry or gnome-keyring setup.\nAssuming an empty password.'
% url)
print(err, file=sys.stderr)
password = ''
# Read credentials from config
if user is None:
#FIXME: this could actually be the ideal spot to take defaults
#from the general section.
user = cp.get(url, 'user', raw=True) # need to set raw to prevent '%' expansion
password = cp.get(url, 'pass', raw=True) # especially on password!
try:
passwordx = passx_decode(cp.get(url, 'passx', raw=True)) # especially on password!
except:
passwordx = ''
if password == None or password == 'your_password':
password = ''
if user is None or user == '':
raise oscerr.ConfigError('user is blank for %s, please delete or complete the "user=" entry in %s.' % (apiurl, config['conffile']), config['conffile'])
if config['plaintext_passwd'] and passwordx or not config['plaintext_passwd'] and password:
if config['plaintext_passwd']:
if password != passwordx:
print('%s: rewriting from encoded pass to plain pass' % url, file=sys.stderr)
add_section(conffile, url, user, passwordx)
password = passwordx
else:
if password != passwordx:
print('%s: rewriting from plain pass to encoded pass' % url, file=sys.stderr)
add_section(conffile, url, user, password)
if not config['plaintext_passwd']:
password = passwordx
if cp.has_option(url, 'http_headers'):
http_headers = cp.get(url, 'http_headers')
http_headers = http_header_regexp.findall(http_headers)
else:
http_headers = []
if cp.has_option(url, 'aliases'):
for i in cp.get(url, 'aliases').split(','):
key = i.strip()
if key == '':
continue
if key in aliases:
msg = 'duplicate alias entry: \'%s\' is already used for another apiurl' % key
raise oscerr.ConfigError(msg, conffile)
aliases[key] = url
api_host_options[apiurl] = {'user': user,
'pass': password,
'http_headers': http_headers}
optional = ('email', 'sslcertck', 'cafile', 'capath')
for key in optional:
if cp.has_option(url, key):
if key == 'sslcertck':
api_host_options[apiurl][key] = cp.getboolean(url, key)
else:
api_host_options[apiurl][key] = cp.get(url, key)
if cp.has_option(url, 'build-root', proper=True):
api_host_options[apiurl]['build-root'] = cp.get(url, 'build-root', raw=True)
if not 'sslcertck' in api_host_options[apiurl]:
api_host_options[apiurl]['sslcertck'] = True
if scheme == 'http':
api_host_options[apiurl]['sslcertck'] = False
if cp.has_option(url, 'trusted_prj'):
api_host_options[apiurl]['trusted_prj'] = cp.get(url, 'trusted_prj').split(' ')
else:
api_host_options[apiurl]['trusted_prj'] = []
# add the auth data we collected to the config dict
config['api_host_options'] = api_host_options
config['apiurl_aliases'] = aliases
apiurl = aliases.get(config['apiurl'], config['apiurl'])
config['apiurl'] = urljoin(*parse_apisrv_url(None, apiurl))
# backward compatibility
if 'apisrv' in config:
apisrv = config['apisrv'].lstrip('http://')
apisrv = apisrv.lstrip('https://')
scheme = config.get('scheme', 'https')
config['apiurl'] = urljoin(scheme, apisrv)
if 'apisrc' in config or 'scheme' in config:
print('Warning: Use of the \'scheme\' or \'apisrv\' in ~/.oscrc is deprecated!\n' \
'Warning: See README for migration details.', file=sys.stderr)
if 'build_platform' in config:
print('Warning: Use of \'build_platform\' config option is deprecated! (use \'build_repository\' instead)', file=sys.stderr)
config['build_repository'] = config['build_platform']
config['verbose'] = int(config['verbose'])
# override values which we were called with
if override_verbose:
config['verbose'] = override_verbose + 1
if override_debug:
config['debug'] = override_debug
if override_http_debug:
config['http_debug'] = override_http_debug
if override_http_full_debug:
config['http_debug'] = override_http_full_debug or config['http_debug']
config['http_full_debug'] = override_http_full_debug
if override_traceback:
config['traceback'] = override_traceback
if override_post_mortem:
config['post_mortem'] = override_post_mortem
if override_apiurl:
apiurl = aliases.get(override_apiurl, override_apiurl)
# check if apiurl is a valid url
config['apiurl'] = urljoin(*parse_apisrv_url(None, apiurl))
# XXX unless config['user'] goes away (and is replaced with a handy function, or
# config becomes an object, even better), set the global 'user' here as well,
# provided that there _are_ credentials for the chosen apiurl:
try:
config['user'] = get_apiurl_usr(config['apiurl'])
except oscerr.ConfigMissingApiurl as e:
e.msg = config_missing_apiurl_text % config['apiurl']
e.file = conffile
raise e
# finally, initialize urllib2 for to use the credentials for Basic Authentication
init_basicauth(config)
# vim: sw=4 et
| hramrach/osc | osc/conf.py | Python | gpl-2.0 | 40,792 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import json
import sqlite3
import subprocess
from import_abstr import ImportAbstraction
CORPUS_WRITER = 'utils/mallet/CorpusWriter.jar'
class ImportCorpus( ImportAbstraction ):
def __init__( self, app_name, app_model = 'corpus', app_desc = 'Corpus Metadata and Statistics' ):
ImportAbstraction.__init__( self, app_name, app_model, app_desc )
def ImportMeta( self, filename ):
print 'Importing metadata...'
header, meta = self.ExtractDocMeta( filename )
self.SaveMetaToDisk( meta, header )
self.SaveMetaToDB( meta, header )
def ExtractDocMeta( self, filename ):
print 'Reading document metadata: {}'.format( filename )
try:
with open( filename, 'r' ) as f:
header = None
meta = {}
for index, line in enumerate( f ):
values = line[:-1].decode( 'utf-8' ).split( '\t' )
if header is None:
header = values
else:
record = {}
for n, value in enumerate( values ):
if n < len(header):
key = header[n]
else:
key = 'Field{:d}'.format( n+1 )
record[ key ] = value
key = record['DocID']
meta[ key ] = record
return sorted(header), meta
except:
return None, None
def SaveMetaToDisk( self, meta, header ):
print 'Writing data to disk: {}'.format( self.data_path )
if meta is not None and header is not None:
filename = '{}/doc-meta.json'.format( self.data_path )
with open( filename, 'w' ) as f:
data = { "header" : { h : ( "string" if h != 'Year' else 'integer' ) for h in header }, "data" : meta }
json.dump( data, f, encoding = 'utf-8', indent = 2, sort_keys = True )
def SaveMetaToDB( self, meta, header ):
def CreateTable():
columnDefs = [ [ f ] for f in header ]
for i, columnDef in enumerate(columnDefs):
column = header[i]
if column.lower() == 'year':
columnDef.append( 'INTEGER' )
else:
columnDef.append( 'STRING' )
if column.lower() == 'docid':
columnDef.append( 'UNIQUE' )
columnDef.append( 'NOT NULL' )
columnDefs = ', '.join( [ ' '.join(d) for d in columnDefs ] )
sql = """CREATE TABLE IF NOT EXISTS {TABLE} ( Key INTEGER PRIMARY KEY AUTOINCREMENT, {COLUMN_DEFS} );""".format( TABLE = table, COLUMN_DEFS = columnDefs )
conn.execute( sql )
def InsertData():
columns = ', '.join( header )
values = ', '.join( [ '?' for f in header ] )
sql = """INSERT OR IGNORE INTO {TABLE} ( {COLUMNS} ) VALUES( {VALUES} )""".format( TABLE = table, COLUMNS = columns, VALUES = values )
data = []
for d in meta.itervalues():
data.append( [ d[f] for f in header ] )
conn.executemany( sql, data )
print 'Writing data to database: {}'.format( self.database_path )
if meta is not None and header is not None:
table = 'DocMeta'
filename = '{}/doc-meta.sqlite'.format( self.database_path )
conn = sqlite3.connect( filename )
CreateTable()
InsertData()
conn.commit()
conn.close()
def ImportTerms( self, filename, minFreq = 5, minDocFreq = 2, maxCount = 1000 ):
print 'Computing term frequencies and co-occurrences...'
corpus = self.ExtractCorpusTerms( filename )
termFreqs, termDocFreqs = self.ComputeTermFreqs( corpus )
termCoFreqs, termCoFreqOptions = self.ComputeTermCoFreqs( corpus, termFreqs, termDocFreqs, minFreq, minDocFreq, maxCount )
self.SaveTermsToDisk( termFreqs, termDocFreqs, termCoFreqs, termCoFreqOptions )
def ExtractCorpusTerms( self, filename ):
print 'Reading mallet corpus: {}'.format( filename )
command = [ "java", "-jar", CORPUS_WRITER, filename ]
print ' '.join(command)
process = subprocess.Popen( command, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
( out, err ) = process.communicate()
print err
corpus = {}
for document in out.splitlines():
docID, docTokens = document.split( '\t' )
corpus[ docID ] = docTokens.split( ' ' )
return corpus
def ComputeTermFreqs( self, corpus ):
print 'Computing term freqs...'
termFreqs = {}
termDocFreqs = {}
for docID, docTokens in corpus.iteritems():
for token in docTokens:
if token not in termFreqs:
termFreqs[ token ] = 1
else:
termFreqs[ token ] += 1
uniqueTokens = frozenset( docTokens )
for token in uniqueTokens:
if token not in termDocFreqs:
termDocFreqs[ token ] = 1
else:
termDocFreqs[ token ] += 1
return termFreqs, termDocFreqs
def ComputeTermCoFreqs( self, corpus, termFreqs, termDocFreqs, minFreq, minDocFreq, maxCount ):
def getTokenPairs( firstToken, secondToken ):
if firstToken < secondToken:
return firstToken, secondToken
else:
return secondToken, firstToken
print 'Computing term co-occurrences...'
keys = set()
for term in termFreqs:
if termFreqs[term] >= minFreq:
if termDocFreqs[term] >= minDocFreq:
keys.add(term)
keys = sorted( keys, key = lambda x : -termFreqs[x] )
keys = keys[:maxCount]
keySet = frozenset(keys)
termCoFreqs = {}
for docID, docTokens in corpus.iteritems():
n = len(docTokens)
for i in range(n):
firstToken = docTokens[i]
if firstToken in keySet:
for j in range(i+1,n):
secondToken = docTokens[j]
if secondToken in keySet:
a, b = getTokenPairs( firstToken, secondToken )
if a not in termCoFreqs:
termCoFreqs[a] = { 'b' : 1 }
elif b not in termCoFreqs[a]:
termCoFreqs[a][b] = 1
else:
termCoFreqs[a][b] += 1
options = {
'minFreq' : minFreq,
'minDocFreq' : minDocFreq,
'maxCount' : maxCount,
'keys' : keys
}
return termCoFreqs, options
def SaveTermsToDisk( self, termFreqs, termDocFreqs, termCoFreqs, termCoFreqOptions ):
print 'Writing data to disk: {}'.format( self.data_path )
filename = '{}/term-freqs.json'.format( self.data_path )
with open( filename, 'w' ) as f:
json.dump( termFreqs, f, encoding = 'utf-8', indent = 2, sort_keys = True )
filename = '{}/term-doc-freqs.json'.format( self.data_path )
with open( filename, 'w' ) as f:
json.dump( termDocFreqs, f, encoding = 'utf-8', indent = 2, sort_keys = True )
filename = '{}/term-co-freqs.json'.format( self.data_path )
with open( filename, 'w' ) as f:
json.dump( termCoFreqs, f, encoding = 'utf-8', indent = 2, sort_keys = True )
filename = '{}/term-co-freqs-options.json'.format( self.data_path )
with open( filename, 'w' ) as f:
json.dump( termCoFreqOptions, f, encoding = 'utf-8', indent = 2, sort_keys = True )
def main():
parser = argparse.ArgumentParser( description = 'Import a MALLET topic model as a web2py application.' )
parser.add_argument( 'app_name', type = str, help = 'Web2py application identifier' )
parser.add_argument( '--meta' , type = str, default = None, help = 'Import document metadata from a tab-delimited file' )
parser.add_argument( '--terms' , type = str, default = None, help = 'Calculate term freqs and co-occurrences from a corpus.mallet file' )
args = parser.parse_args()
importer = ImportCorpus( app_name = args.app_name )
if args.meta is not None:
importer.ImportMeta( args.meta )
if args.terms is not None:
importer.ImportTerms( args.terms )
importer.AddToWeb2py()
if __name__ == '__main__':
main()
| jyt109/termite-data-server | bin/import_corpus.py | Python | bsd-3-clause | 7,285 |
import random
from xworld_task import XWorldTask
from py_util import overrides
class XWorldDialog(XWorldTask):
def __init__(self, env):
super(XWorldDialog, self).__init__(env)
self.max_steps = 7 # maximum number of steps, should be related to number of sel classes
self.speak_correct_reward = 1
self.speak_incorrect_reward = -1
self.question_ask_reward = 0.1
self.nothing_said_reward = -1
self.reset_dialog_setting()
## some config para
self.stepwise_reward = True
self.success_reward = 1
self.failure_reward = -0.1
self.step_penalty = -0.01
self.sentence_level_task = True # False: word level task
def reset_dialog_setting(self):
self.question_ratio = 0.5 # the chance of asking a question or making a statement
self.teacher_sent_prev_ = [] # stores teacher's sentences in a session in order
self.behavior_flags = []
def idle(self):
"""
Start a task
"""
agent, _, _ = self._get_agent()
goals = self._get_goals()
assert len(goals) > 0, "there is no goal on the map!"
sel_goal = random.choice(goals)
## first generate all candidate answers
self._bind("S -> statement")
self._set_production_rule("G -> " + " ".join(["'" + sel_goal.name + "'"]))
self.answers = self._generate_all()
## then generate the question
self._bind("S -> question")
self.questions = self._generate_all()
sent = self.sentence_selection_with_ratio()
self._set_production_rule("R -> " + " ".join(["'" + sent + "'"]))
teacher_sent = self._generate_and_save([sent])
return ["reward", 0.0, teacher_sent]
def reward(self):
"""
Giving reward to the agent
"""
def get_reward(reward, success=None):
"""
Internal function for compute reward based on the stepwise_reward flag.
reward is the current step reward
success: None: not an ending step, True: success, False: failure
"""
if self.stepwise_reward:
return reward
elif success is None:
# only step_penalty for intermediate steps in non-stepwise rewarding case
return self.step_penalty
elif success is True: #final stage
return self.success_reward
elif success is False:
return self.failure_reward
# get agent's sentence (response to previous sentence from teacher)
_, agent_sent, _ = self._get_agent()
# get teacher's sentence
prev_sent = self._get_last_sent()
# if the previous stage is a qa stage
qa_stage_prev = (prev_sent == "" or prev_sent in self.questions)
is_question_asked = agent_sent in self.questions
is_reply_correct = agent_sent in self.answers
is_nothing_said = agent_sent == ""
# extend_step is for provding answer by teacher
extend_step = (is_nothing_said or is_question_asked) and \
qa_stage_prev
# in this case, move to the next object for interaction
if not extend_step:
self.env.within_session_reinstantiation()
goals = self._get_goals()
sel_goal = random.choice(goals)
# update answers
self._bind("S -> statement") # first bind S to statement
#self._bind("G -> '%s'" % sel_goal.name)
self._set_production_rule("G -> " + " ".join(["'" + sel_goal.name + "'"]))
self.answers = self._generate_all()
self.steps_in_cur_task += 1
# decide reward and next stage
if self.steps_in_cur_task + 1 < self.max_steps:
if self.steps_in_cur_task > self.max_steps / 2:
self.question_ratio = 1
if qa_stage_prev:
if is_question_asked:
# reward feedback
if not is_nothing_said:
reward = self.question_ask_reward
else:
reward = self.nothing_said_reward
self.behavior_flags += [False]
# sentence feedback (answer/statement)
self._bind("S -> statement")
#self._bind("G -> '%s'" % sel_goal.name)
self._set_production_rule("G -> " + " ".join(["'" + sel_goal.name + "'"]))
teacher_sent = self._generate_and_save()
elif is_reply_correct:
self.behavior_flags += [True]
reward = self.speak_correct_reward
reward = get_reward(reward, all(self.behavior_flags))
teacher_sent = ""
return ["conversation_wrapup", reward, teacher_sent]
else:
self.behavior_flags += [False]
reward = self.speak_incorrect_reward
sent = self.sentence_selection_with_ratio()
self._set_production_rule("R -> " + " ".join(["'" + sent + "'"]))
teacher_sent = self._generate_and_save([sent])
else:
# reward feedback for different cases
if is_reply_correct: # repeat statement
reward = 0
elif is_nothing_said:
reward = self.nothing_said_reward
elif is_question_asked:
reward = self.speak_incorrect_reward
else:
self.behavior_flags += [False]
reward = self.speak_incorrect_reward
# sentence feedback
sent = self.sentence_selection_with_ratio()
self._set_production_rule("R -> " + " ".join(["'" + sent + "'"]))
teacher_sent = self._generate_and_save([sent])
reward = get_reward(reward)
return ["reward", reward, teacher_sent]
else:
if qa_stage_prev and is_reply_correct:
self.behavior_flags += [True]
reward= self.speak_correct_reward
else:
self.behavior_flags += [False]
reward = self.speak_incorrect_reward
teacher_sent = ""
reward = get_reward(reward, all(self.behavior_flags))
return ["conversation_wrapup", reward, teacher_sent]
@overrides(XWorldTask)
def conversation_wrapup(self):
"""
This dummpy stage simply adds an additional time step after the
conversation is over, which enables the agent to learn language model
from teacher's last sentence.
"""
if all(self.behavior_flags):
self._record_success()
self._record_event("correct_reply", next=True)
else:
self._record_failure()
self._record_event("wrong_reply", next=True)
self._record_event(self.prev_event)
self.prev_event = None
self.reset_dialog_setting()
return ["idle", 0, ""]
def get_stage_names(self):
"""
return all the stage names; does not have to be in order
"""
return ["idle", "reward", "conversation_wrapup"]
def _define_grammar(self):
if False:
return self.get_sentence_level_grammar()
else:
return self.get_word_level_grammar()
def get_sentence_level_grammar(self):
grammar_str = """
S --> question | statement
question -> E | Q
statement-> A1 | A2 | A3 | A4 | A5 | A6 | A7 | A8
E -> ''
Q -> Q1 | Q2 | Q3
Q1 -> 'what'
Q2 -> 'what' M
Q3 -> 'tell' 'what' N
M -> 'is' 'it' | 'is' 'this' | 'is' 'there' | 'do' 'you' 'see' | 'can' 'you' 'see' | 'do' 'you' 'observe' | 'can' 'you' 'observe'
N -> 'it' 'is' | 'this' 'is' | 'there' 'is' | 'you' 'see' | 'you' 'can' 'see' | 'you' 'observe' | 'you' 'can' 'observe'
A1 -> G
A2 -> 'it' 'is' G
A3 -> 'this' 'is' G
A4 -> 'there' 'is' G
A5 -> 'i' 'see' G
A6 -> 'i' 'observe' G
A7 -> 'i' 'can' 'see' G
A8 -> 'i' 'can' 'observe' G
G -> 'dummy'
"""
return grammar_str, "S"
def get_word_level_grammar(self):
grammar_str = """
S --> question | statement
question -> E | Q
statement-> G
E -> ''
Q -> 'what'
G -> 'dummy'
"""
return grammar_str, "S"
def sentence_selection_with_ratio(self):
if random.uniform(0,1) > self.question_ratio: # proceed with statement
return random.choice(self.answers)
else:
return random.choice(self.questions)
def _generate_and_save(self, teacher_sent = []):
"""
generate (if teacher_sent is empty) and save the teacher's sentence
to teacher's previous sentence pool
"""
if not teacher_sent:
teacher_sent = [self._generate()]
self.teacher_sent_prev_ = self.teacher_sent_prev_ + teacher_sent
return teacher_sent[0]
def _get_last_sent(self):
"""
get the sentence from teacher in the last time step
"""
assert self.teacher_sent_prev_, "make sure the previous sentence set is non-empty"
sent = self.teacher_sent_prev_[-1]
return sent
| skylian/XWorld | games/xworld/tasks/XWorldDialog.py | Python | apache-2.0 | 9,465 |
import datetime
from unittest import mock
from ddt import data, ddt
from freezegun import freeze_time
from rest_framework import status, test
from waldur_core.structure.models import CustomerRole
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.structure.tests import fixtures
from waldur_mastermind.marketplace import models
from waldur_mastermind.marketplace.tasks import process_order
from waldur_mastermind.marketplace.tests import factories
from waldur_mastermind.marketplace.tests.factories import OFFERING_OPTIONS
from waldur_mastermind.marketplace.tests.helpers import override_marketplace_settings
from waldur_mastermind.marketplace_support import PLUGIN_NAME
@ddt
class OrderGetTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
@data('staff', 'owner', 'admin', 'manager')
def test_orders_should_be_visible_to_colleagues_and_staff(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 1)
@data('user')
def test_orders_should_be_invisible_to_other_users(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 0)
def test_items_should_be_invisible_to_unauthenticated_users(self):
url = factories.OrderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@ddt
class OrderCreateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
@data('staff', 'owner', 'admin', 'manager')
def test_user_can_create_order_in_valid_project(self, user):
user = getattr(self.fixture, user)
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
@data('user')
def test_user_can_not_create_order_in_invalid_project(self, user):
user = getattr(self.fixture, user)
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_not_create_item_if_offering_is_not_available(self):
offering = factories.OfferingFactory(state=models.Offering.States.ARCHIVED)
response = self.create_order(self.fixture.staff, offering)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_order_with_plan(self):
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@mock.patch('waldur_mastermind.marketplace.tasks.notify_order_approvers.delay')
def test_notification_is_sent_when_order_is_created(self, mock_task):
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, shared=True, billable=True
)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.manager, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mock_task.assert_called_once()
def test_can_not_create_order_if_offering_is_not_available_to_customer(self):
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, shared=False
)
offering.customer.add_user(self.fixture.owner, CustomerRole.OWNER)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.owner, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_not_create_order_with_plan_related_to_another_offering(self):
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_not_create_order_if_plan_max_amount_has_been_reached(self):
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering, max_amount=3)
factories.ResourceFactory.create_batch(3, plan=plan, offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_create_order_with_valid_attributes_specified_by_options(self):
attributes = {
'storage': 1000,
'ram': 30,
'cpu_count': 5,
}
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, options=OFFERING_OPTIONS
)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': attributes,
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['items'][0]['attributes'], attributes)
def test_user_can_not_create_order_with_invalid_attributes(self):
attributes = {
'storage': 'invalid value',
}
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, options=OFFERING_OPTIONS
)
plan = factories.PlanFactory(offering=offering)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'attributes': attributes,
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_create_order_with_valid_limits(self):
limits = {
'storage': 1000,
'ram': 30,
'cpu_count': 5,
}
offering = factories.OfferingFactory(
state=models.Offering.States.ACTIVE, type=PLUGIN_NAME
)
plan = factories.PlanFactory(offering=offering)
for key in limits.keys():
models.OfferingComponent.objects.create(
offering=offering,
type=key,
billing_type=models.OfferingComponent.BillingTypes.LIMIT,
)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'limits': limits,
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
order_item = models.OrderItem.objects.last()
self.assertEqual(order_item.limits['cpu_count'], 5)
def test_user_can_not_create_order_with_invalid_limits(self):
limits = {
'storage': 1000,
'ram': 30,
'cpu_count': 5,
}
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
plan = factories.PlanFactory(offering=offering)
for key in limits.keys():
models.OfferingComponent.objects.create(
offering=offering,
type=key,
billing_type=models.OfferingComponent.BillingTypes.FIXED,
)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'plan': factories.PlanFactory.get_url(plan),
'limits': limits,
'attributes': {},
},
]
}
response = self.create_order(
self.fixture.staff, offering, add_payload=add_payload
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_order_creating_is_not_available_for_blocked_organization(self):
user = self.fixture.owner
self.fixture.customer.blocked = True
self.fixture.customer.save()
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_create_order_if_terms_of_service_have_been_accepted(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
offering.terms_of_service = 'Terms of service'
offering.save()
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
'accepting_terms_of_service': True,
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def test_user_can_create_order_if_terms_of_service_are_not_filled(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def test_user_can_create_order_if_offering_is_not_shared(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
offering.shared = False
offering.customer = self.project.customer
offering.save()
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def test_user_cannot_create_order_if_terms_of_service_have_been_not_accepted(self):
user = self.fixture.admin
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
offering.terms_of_service = 'Terms of service'
offering.save()
add_payload = {
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
]
}
response = self.create_order(user, offering=offering, add_payload=add_payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
str(response.content, 'utf-8'),
'{"items":["Terms of service for offering \'%s\' have not been accepted."]}'
% offering,
)
self.assertFalse(models.Order.objects.filter(created_by=user).exists())
def test_user_cannot_create_order_in_project_is_expired(self):
user = getattr(self.fixture, 'staff')
self.project.end_date = datetime.datetime(day=1, month=1, year=2020)
self.project.save()
with freeze_time('2020-01-01'):
response = self.create_order(user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_if_divisions_do_not_match_order_validation_fails(self):
user = self.fixture.staff
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
division = structure_factories.DivisionFactory()
offering.divisions.add(division)
response = self.create_order(user, offering)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_if_divisions_match_order_validation_passes(self):
user = self.fixture.staff
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
division = structure_factories.DivisionFactory()
offering.divisions.add(division)
self.fixture.customer.division = division
self.fixture.customer.save()
response = self.create_order(user, offering)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(models.Order.objects.filter(created_by=user).exists())
self.assertEqual(1, len(response.data['items']))
def create_order(self, user, offering=None, add_payload=None):
if offering is None:
offering = factories.OfferingFactory(state=models.Offering.States.ACTIVE)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_list_url()
payload = {
'project': structure_factories.ProjectFactory.get_url(self.project),
'items': [
{
'offering': factories.OfferingFactory.get_url(offering),
'attributes': {},
},
],
}
if add_payload:
payload.update(add_payload)
return self.client.post(url, payload)
@ddt
class OrderApproveTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.url = factories.OrderFactory.get_url(self.order, 'approve')
def test_owner_can_approve_order(self):
self.ensure_user_can_approve_order(self.fixture.owner)
def test_by_default_manager_can_not_approve_order(self):
self.ensure_user_can_not_approve_order(self.fixture.manager)
def test_by_default_admin_can_not_approve_order(self):
self.ensure_user_can_not_approve_order(self.fixture.admin)
@override_marketplace_settings(MANAGER_CAN_APPROVE_ORDER=True)
def test_manager_can_approve_order_if_feature_is_enabled(self):
self.ensure_user_can_approve_order(self.fixture.manager)
@override_marketplace_settings(ADMIN_CAN_APPROVE_ORDER=True)
def test_admin_can_approve_order_if_feature_is_enabled(self):
self.ensure_user_can_approve_order(self.fixture.admin)
def test_user_can_not_reapprove_active_order(self):
self.order.state = models.Order.States.EXECUTING
self.order.save()
response = self.approve_order(self.fixture.owner)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
self.assertEqual(self.order.approved_by, None)
def test_order_approving_is_not_available_for_blocked_organization(self):
self.order.project.customer.blocked = True
self.order.project.customer.save()
response = self.approve_order(self.fixture.owner)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('waldur_mastermind.marketplace.tasks.process_order.delay')
def test_when_order_with_basic_offering_is_approved_resource_is_marked_as_ok(
self, mocked_delay
):
mocked_delay.side_effect = process_order
offering = factories.OfferingFactory(
customer=self.fixture.customer, type='Marketplace.Basic'
)
order_item = factories.OrderItemFactory(offering=offering, order=self.order)
self.approve_order(self.fixture.owner)
order_item.refresh_from_db()
self.assertEqual(order_item.resource.state, models.Resource.States.OK)
def test_user_cannot_approve_order_if_project_is_expired(self):
self.project.end_date = datetime.datetime(year=2020, month=1, day=1).date()
self.project.save()
with freeze_time('2020-01-01'):
response = self.approve_order(self.fixture.staff)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def approve_order(self, user):
self.client.force_authenticate(user)
response = self.client.post(self.url)
self.order.refresh_from_db()
return response
def ensure_user_can_approve_order(self, user):
response = self.approve_order(user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.order.approved_by, user)
def ensure_user_can_not_approve_order(self, user):
response = self.approve_order(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(self.order.approved_by, None)
@ddt
class OrderRejectTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
self.order_item_1 = factories.OrderItemFactory(order=self.order)
self.order_item_2 = factories.OrderItemFactory(order=self.order)
self.url = factories.OrderFactory.get_url(self.order, 'reject')
@data('staff', 'manager', 'admin', 'owner')
def test_authorized_user_can_reject_order(self, user):
self.client.force_authenticate(getattr(self.fixture, user))
response = self.client.post(self.url)
for obj in [self.order, self.order_item_1, self.order_item_2]:
obj.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self.order.state, models.Order.States.REJECTED)
self.assertEqual(self.order_item_1.state, models.OrderItem.States.TERMINATED)
self.assertEqual(self.order_item_2.state, models.OrderItem.States.TERMINATED)
def test_support_users_can_not_reject_order(self):
self.client.force_authenticate(self.fixture.global_support)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_not_reject_unrequested_order(self):
self.client.force_authenticate(self.fixture.staff)
self.order.approve()
self.order.save()
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_order_rejecting_is_not_available_for_blocked_organization(self):
self.order.project.customer.blocked = True
self.order.project.customer.save()
self.client.force_authenticate(self.fixture.manager)
response = self.client.post(self.url)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@ddt
class OrderDeleteTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.project = self.fixture.project
self.manager = self.fixture.manager
self.order = factories.OrderFactory(
project=self.project, created_by=self.manager
)
@data('staff', 'owner')
def test_owner_and_staff_can_delete_order(self, user):
response = self.delete_order(user)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT, response.data
)
self.assertFalse(models.Order.objects.filter(created_by=self.manager).exists())
@data('admin', 'manager')
def test_other_colleagues_can_not_delete_order(self, user):
response = self.delete_order(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(models.Order.objects.filter(created_by=self.manager).exists())
@data('user')
def test_other_user_can_not_delete_order(self, user):
response = self.delete_order(user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertTrue(models.Order.objects.filter(created_by=self.manager).exists())
def test_order_deleting_is_not_available_for_blocked_organization(self):
self.fixture.customer.blocked = True
self.fixture.customer.save()
response = self.delete_order('owner')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def delete_order(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.OrderFactory.get_url(self.order)
response = self.client.delete(url)
return response
class OrderStateTest(test.APITransactionTestCase):
def test_switch_order_state_to_done_when_all_order_items_are_processed(self):
order_item = factories.OrderItemFactory(state=models.OrderItem.States.EXECUTING)
order = order_item.order
order.state = models.Order.States.EXECUTING
order.save()
order_item.state = models.OrderItem.States.DONE
order_item.save()
order.refresh_from_db()
self.assertEqual(order.state, models.Order.States.DONE)
def test_not_switch_order_state_to_done_when_not_all_order_items_are_processed(
self,
):
order_item = factories.OrderItemFactory(state=models.OrderItem.States.EXECUTING)
order = order_item.order
factories.OrderItemFactory(state=models.OrderItem.States.EXECUTING, order=order)
order.state = models.Order.States.EXECUTING
order.save()
order_item.state = models.OrderItem.States.DONE
order_item.save()
order.refresh_from_db()
self.assertEqual(order.state, models.Order.States.EXECUTING)
| opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/marketplace/tests/test_orders.py | Python | mit | 25,173 |
#
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import operator
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, less, inexact, nan, zeros, sinc)
from . import _ufuncs as ufuncs
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, ndtri,
poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros',
'bessel_diff_formula', 'bi_zeros', 'clpmn', 'comb',
'digamma', 'diric', 'ellipk', 'erf_zeros', 'erfcinv',
'erfinv', 'euler', 'factorial', 'factorialk', 'factorial2',
'fresnel_zeros', 'fresnelc_zeros', 'fresnels_zeros',
'gamma', 'h1vp', 'h2vp', 'hankel1', 'hankel2', 'hyp0f1',
'iv', 'ivp', 'jn_zeros', 'jnjnp_zeros', 'jnp_zeros',
'jnyn_zeros', 'jv', 'jvp', 'kei_zeros', 'keip_zeros',
'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv', 'kvp',
'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef',
'ndtri', 'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq',
'perm', 'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn',
'riccati_yn', 'sinc', 'y0_zeros', 'y1_zeros', 'y1p_zeros',
'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta']
def _nonneg_int_or_fail(n, var_name, strict=True):
try:
if strict:
# Raises an exception if float
n = operator.index(n)
elif n == floor(n):
n = int(n)
else:
raise ValueError()
if n < 0:
raise ValueError()
except (ValueError, TypeError) as err:
raise err.__class__("{} must be a non-negative integer".format(var_name))
return n
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x, n) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1.84903536e+03+0.j , -2.57735387e+01+0.j ,
-3.06627741e-02+0.08750845j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.29.E5
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.29.E5
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E(0), E(1), ..., E(n).
The Euler numbers [1]_ are also known as the secant numbers.
Because ``euler(n)`` returns floating point values, it does not give
exact values for large `n`. The first inexact value is E(22).
Parameters
----------
n : int
The highest index of the Euler number to be returned.
Returns
-------
ndarray
The Euler numbers [E(0), E(1), ..., E(n)].
The odd Euler numbers, which are all zero, are included.
References
----------
.. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences,
https://oeis.org/A122045
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
>>> from scipy.special import euler
>>> euler(6)
array([ 1., 0., -1., 0., 5., 0., -61.])
>>> euler(13).astype(np.int64)
array([ 1, 0, -1, 0, 5, 0, -61,
0, 1385, 0, -50521, 0, 2702765, 0])
>>> euler(22)[-1] # Exact value of E(22) is -69348874393137901.
-69348874393137976.0
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
r"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
return ellipkm1(1 - asarray(m))
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, float, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# https://stackoverflow.com/a/16327037
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann or Hurwitz zeta function.
Parameters
----------
x : array_like of float
Input data, must be real
q : array_like of float, optional
Input data, must be real. Defaults to Riemann zeta.
out : ndarray, optional
Output array for the computed values.
Returns
-------
out : array_like
Values of zeta(x).
Notes
-----
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See Also
--------
zetac
Examples
--------
>>> from scipy.special import zeta, polygamma, factorial
Some specific values:
>>> zeta(2), np.pi**2/6
(1.6449340668482266, 1.6449340668482264)
>>> zeta(4), np.pi**4/90
(1.0823232337111381, 1.082323233711138)
Relation to the `polygamma` function:
>>> m = 3
>>> x = 1.25
>>> polygamma(m, x)
array(2.782144009188397)
>>> (-1)**(m+1) * factorial(m) * zeta(m+1, x)
2.7821440091883969
"""
if q is None:
q = 1
return _zeta(x, q, out)
| gfyoung/scipy | scipy/special/basic.py | Python | bsd-3-clause | 64,497 |
#!/usr/bin/python
"""chicks' answer to Euler Project problem #56"""
import math
max_digit_sum = 0
max_factor = 100
def calc_digit_sum(n):
digits = list(str(n))
sum = 0
for digit in digits:
sum += int(digit)
#print str(n) + " -> " + str(sum)
return sum
for a in xrange(1, max_factor):
for b in xrange(1, max_factor):
digit_sum = calc_digit_sum( a ** b )
if digit_sum > max_digit_sum:
max_digit_sum = digit_sum
print "ANSWER:" + str(max_digit_sum)
| chicks-net/euler-answers-chicks | problem056/answer056.py | Python | gpl-2.0 | 470 |
# The MIT License (MIT)
#
# Copyright (c) 2013 Kyle Heath
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#from boto.ec2 import connection as ec2_connection
from boto.s3 import connection as s3_connection
from cirruscluster import core
from passlib import hash
import pkg_resources
from cirruscluster.cluster import ec2cluster
import logging
import os
import re
import requests
import stat
import time
import urllib2
class MaprClusterConfig(object):
"""
MapR Service Configuration
"""
def __init__(self):
super(MaprClusterConfig, self).__init__()
self.name = None
self.mapr_ami_owner_id = None
self.instance_type = None
self.region = None
self.authorized_client_cidrs = None
self.mapr_version = None
# list of the placement zones within region that should be used to
# evenly distribute the nodes (ex ['a','b', 'e'])
self.zones = None
return
class MaprCluster(object, ):
"""
MapR Service
"""
def __init__(self, config):
self.__CheckConfigOrDie(config)
self.config = config
self.cluster = ec2cluster.Ec2Cluster(config.cluster_name,
config.region_name)
self.ec2 = self.cluster.ec2
self.s3 = s3_connection.S3Connection()
self.hadoop_conf_dir = '/opt/mapr/hadoop/hadoop-0.20.2/conf/'
# fetch or create the ssh key used for all cluster nodes
self.cluster_keypair_name = 'cirrus_cluster'
src_region = config.region_name
dst_regions = core.tested_region_names
aws_id = None
aws_secret = None
self.ssh_key = core.InitKeypair(aws_id, aws_secret, self.ec2, self.s3,
self.cluster_keypair_name, src_region,
dst_regions)
# make sure we have a local copy of private ssh key so we can connect
# easily from command line
cluster_ssh_key_path = os.path.expanduser('~/keys/%s.pem' % \
self.cluster_keypair_name )
if not os.path.exists(os.path.dirname(cluster_ssh_key_path)):
os.makedirs(os.path.dirname(cluster_ssh_key_path))
mode = stat.S_IRUSR | stat.S_IWUSR
with os.fdopen(os.open(cluster_ssh_key_path, os.O_WRONLY | os.O_CREAT |
os.O_TRUNC, mode), 'w') as handle:
handle.write(self.ssh_key)
return
def _GetAvailabilityZoneNameByIndex(self, index):
assert(index < len(self.config.zones))
availability_zone_name = '%s%s' % (self.config.region_name,
self.config.zones[index])
return availability_zone_name
def Create(self, num_instances):
if not self.__StartMaster():
return
self.Resize(num_instances)
self.PushConfig()
return True
def Resize(self, num_instances):
num_spotworkers = len(self.__GetIpsFromCldb()) - 1
# must be done before add workers because adding workers uses the nfs mount
self.ConfigureClient()
if num_spotworkers < num_instances:
num_to_add = num_instances - num_spotworkers
logging.info( 'num_to_add: %d' % (num_to_add))
self.__AddWorkers(num_to_add)
elif num_spotworkers > num_instances:
raise RuntimeError('Shrinking the cluster is not yet supported.')
self.__FixCachePermissions()
return
def Destroy(self):
instances = []
instances.extend(self.cluster.get_instances_in_role("master", "running"))
instances.extend(self.cluster.get_instances_in_role("spotworker",
"running"))
self.cluster.terminate_instances(instances)
return True
def PushConfig(self):
instances = self.__GetCldbRegisteredWorkerInstances()
num_cores = self.GetNumCoresPerWorker()
num_map_slots = num_cores
num_reduce_slots = num_cores - 1
master = self.__GetMasterInstance()
assert(self.__ConfigureMapredSite([master], num_map_slots,
num_reduce_slots))
assert(self.__RestartTaskTrackers(instances))
return
def Reset(self):
self.__CleanUpRootPartition()
#master_instances = self.__GetMasterInstance()
#self.__RunCommandOnInstances('sudo killall -9 maprcli', [master_instances])
instances = self.__GetCldbRegisteredWorkerInstances()
#self.__RunCommandOnInstances('sudo service mapr-warden restart', instances)
assert(self.__RestartTaskTrackers(instances))
self.__RestartJobTracker()
return
def ShowUiUrls(self):
master = self.__GetMasterInstance()
if not master:
print 'No cluster is running...'
return
master_url = master.private_hostname
mapr_ui_url = 'https://%s:8443' % (master_url)
ganglia_ui_url = 'http://%s/ganglia' % (master_url)
cluster_cpu_url = 'http://%s/ganglia/graph.php?g=load_report&z=large&'\
'c=cirrus&m=Cluster%%20Memory%%20Used%%20MB&r=hour&'\
's=descending&hc=4&mc=2&st=1362178130' % (master_url)
cluster_ram_url = 'http://%s/ganglia/graph.php?g=mem_report&z=large&'\
'c=cirrus&m=Cluster%%20Memory%%20Used%%20MB&r=hour&'\
's=descending&hc=4&mc=2&st=1362178130' % (master_url)
cluster_net_url = 'http://%s/ganglia/graph.php?g=network_report&z=large&'\
'c=cirrus&m=network_report&r=hour&s=descending&hc=4&'\
'mc=2&st=1362180395' % (master_url)
print 'mapr_ui: %s' % (mapr_ui_url)
print 'ganglia_ui_url: %s' % (ganglia_ui_url)
print 'cluster_cpu_url: %s' % (cluster_cpu_url)
print 'cluster_ram_url: %s' % (cluster_ram_url)
print 'cluster_net_url: %s' % (cluster_net_url)
return
def GetNumCoresPerWorker(self):
hosts = self.__InstancesToHostnames(self.__GetWorkerInstances())
assert(len(hosts) >= 1)
# construct the ansible runner and execute on all hosts
num_cores_list = core.GetNumCoresOnHosts(hosts, self.ssh_key)
min_num_cores = min(num_cores_list)
max_num_cores = max(num_cores_list)
if min_num_cores != max_num_cores:
raise RuntimeError('Expected all workers to have same number of '
'cores: %s' % (num_cores_list))
num_cores = max_num_cores
return num_cores
def GetProperty(self, property_name):
if property_name == 'slot_summary':
params = {}
params['columns'] = 'service,ttmapSlots,ttReduceSlots'
params['filter'] = '[service==tasktracker]'
r = self.__MaprApi('node list', params)
if not r['status'] == 'OK':
raise RuntimeError('Failed to get property: %s' % r)
slot_summary = {}
prefetch_maptasks = 1.0 #mapreduce.tasktracker.prefetch.maptasks
for item in r['data']:
host = item['ip']
# mapr doesn't report the number of map slots but:
# num_maps_slots + map num_maps_slots * \
# mapreduce.tasktracker.prefetch.maptasks
mapr_reported_map_slots = long(item['ttmapSlots'])
map_slots = long(mapr_reported_map_slots / (1.0 + prefetch_maptasks))
reudce_slots = long(item['ttReduceSlots'])
slot_summary[host] = {}
slot_summary[host]['map_slots'] = map_slots
slot_summary[host]['reduce_slots'] = reudce_slots
return slot_summary
if property_name == 'cores_summary':
params = {}
params['columns'] = 'cpus'
#params['filter'] = '[service==tasktracker]'
r = self.__MaprApi('node list', params)
assert(r['status'] == 'OK')
cores_summary = []
prefetch_maptasks = 1.0 #mapreduce.tasktracker.prefetch.maptasks
assert('data' in r)
for item in r['data']:
host = item['ip']
cpus = long(item['cpus'])
cores_summary.append(cpus)
return cores_summary
if property_name == 'ram_summary':
params = {}
params['columns'] = 'mtotal'
params['filter'] = '[service==tasktracker]'
r = self.__MaprApi('node list', params)
if not r['status'] == 'OK':
raise RuntimeError()
ram_summary = []
prefetch_maptasks = 1.0 #mapreduce.tasktracker.prefetch.maptasks
for item in r['data']:
host = item['ip']
# mapr docs say ram is in GB but it seems to actually be in MB
ram_megabytes = long(item['mtotal'])
ram_gigabytes = int(ram_megabytes * 0.000976562)
ram_summary.append(ram_gigabytes)
return ram_summary
if property_name == 'rack_topology':
rack_topology, _ = self.__GetWorkNodeTopology()
return rack_topology
else:
raise RuntimeError('unknown property requested: %s' % (property_name))
return None
def SetNumMapReduceSlotsPerNode(self, num_map_slots, num_reduce_slots):
# check if current configuration matches requested configuration
slot_summary_data = self.GetProperty('slot_summary')
current_settings_correct = True
for host, host_data in slot_summary_data.iteritems():
assert('map_slots' in host_data)
cur_map_slots_per_node = host_data['map_slots']
if cur_map_slots_per_node != num_map_slots:
print 'cur_map_slots_per_node: %d' % cur_map_slots_per_node
print 'num_map_slots: %d' % num_map_slots
current_settings_correct = False
break
for host, host_data in slot_summary_data.iteritems():
assert('reduce_slots' in host_data)
cur_reduce_slots_per_node = host_data['reduce_slots']
if cur_reduce_slots_per_node != num_reduce_slots:
print 'cur_reduce_slots_per_node: %d' % cur_reduce_slots_per_node
print 'num_reduce_slots: %d' % num_reduce_slots
current_settings_correct = False
break
if current_settings_correct:
return True
else:
print 'current slot config is not correct... need to reconfigure...'
self.__ConfigureMapredSite(self.__GetAllInstances(), num_map_slots,
num_reduce_slots )
master = self.__GetMasterInstance()
self.__RestartTaskTrackers(self.__GetWorkerInstances())
assert(self.__RestartJobTracker())
# todo wait for job tracker
master = self.__GetMasterInstance()
job_tracker_url = 'http://%s:50030' % (master.private_ip)
self.__WaitForUrlReady(job_tracker_url)
return True
def __ConfigureMapredSite(self, instances, num_map_slots, num_reduce_slots):
if not instances:
return
map_slots_param = '%d' % (num_map_slots)
reduce_slots_param = '%d' % (num_reduce_slots)
extra_vars = {'map_slots_param': map_slots_param,
'reduce_slots_param': reduce_slots_param,
'hadoop_conf_dir' : self.hadoop_conf_dir}
path = 'playbooks/mapred-site.yml'
playbook = pkg_resources.resource_filename(__name__, path)
return core.RunPlaybookOnHosts(playbook,
self.__InstancesToHostnames(instances),
self.ssh_key, extra_vars)
def ConfigureLazyWorkers(self):
""" Lazy workers are instances that are running and reachable but failed to
register with the cldb to join the mapr cluster. This trys to find these
missing workers and add them to the cluster. """
lazy_worker_instances = self.__GetMissingWorkers()
if not lazy_worker_instances:
return
reachable_states = self.__AreInstancesReachable(lazy_worker_instances)
reachable_instances = [t[0] for t in zip(lazy_worker_instances,
reachable_states) if t[1]]
print 'reachable_instances: %s' % reachable_instances
self.__ConfigureWorkers(reachable_instances)
return
def TerminateUnreachableInstances(self):
workers = self.__GetWorkerInstances()
unreachable_instances = core.GetUnreachableInstances(workers, self.ssh_key)
print 'unreachable_instances: '
print unreachable_instances
self.cluster.terminate_instances(unreachable_instances)
return
def Debug(self):
#self.__CleanUpRootPartition()
#self.__FixCachePermissions()
#self.__ConfigureClient()
#self.__ConfigureGanglia()
#self.__FixCachePermissions()
#self.ConfigureLazyWorkers()
#self.__CleanUpCoresAlarm()
#self.__InstallTbb(self.__GetWorkerInstances())
#self.TerminateUnreachableInstances()
#self.__ConfigureMaster()
#self.__ConfigureGanglia()
#self.__ConfigureMaster()
self.__ConfigureWorkers(self.__GetWorkerInstances())
#print self.GetNumCoresPerWorker()
#self.ConfigureClient()
#self.__SetWorkerTopology()
#self.__EnableNfsServer()
return
###############################################################################
## Private Methods
###############################################################################
def __CheckConfigOrDie(self, config):
assert(config.cluster_name)
assert(config.cluster_instance_type)
assert(config.region_name)
if not config.mapr_version:
raise RuntimeError('Config missing mapr_version: (e.g. v2.1.3) '
'see http://package.mapr.com/releases/ ')
assert(len(config.zones) >= 1) # at least one zone must be specified
tested_instance_types = ['cc1.4xlarge', 'cc2.8xlarge', 'c1.xlarge']
if not config.cluster_instance_type in tested_instance_types:
raise RuntimeError('this instance type has not been tested: %s' %
(config.cluster_instance_type))
if config.cluster_instance_type == 'cr1.8xlarge':
raise RuntimeError('Currently not supported because mapr start_node '
'perl script can not handle the fact that swap is '
'bigger than ssd disk not to mention the required '
'cache size.')
valid_zones = ['a', 'b', 'c', 'd', 'e']
for zone in config.zones:
assert(zone in valid_zones)
return
def __EnableNfsServer(self):
master = self.__GetMasterInstance()
params = {}
params['nfs'] = 'start'
params['nodes'] = '%s' % (master.private_ip)
result = self.__MaprApi('node/services', params)
return result
def __StartMaster(self):
""" Starts a master node, configures it, and starts services. """
num_masters = len(self.cluster.get_instances_in_role("master", "running"))
assert(num_masters < 1)
logging.info( "waiting for masters to start")
if self.config.master_on_spot_instances:
self.__LaunchSpotMasterInstances()
else:
self.__LaunchOnDemandMasterInstances()
time.sleep(1)
self.__ConfigureMaster()
return True
def ConfigureClient(self):
logging.info( 'ConfigureClient')
unmount_nfs_cmd = 'sudo umount -l /mapr'
logging.info( 'unmounting nfs')
core.ExecuteCmd(unmount_nfs_cmd)
# try to start the nfs server and make sure it starts OK...
# if it doesn't ask the user to apply license and retry until success
logging.info( 'Enabling NFS server...')
while True:
result = self.__EnableNfsServer()
if not result['status'] == 'OK':
logging.info( 'Please use web ui to apply M3 license.')
time.sleep(5)
else:
break
# tell local client to point at the master
master_instance = self.__GetMasterInstance()
assert(master_instance)
cmd = 'sudo rm -rf /opt/mapr/conf/mapr-clusters.conf;'\
'sudo /opt/mapr/server/configure.sh -N %s -c -C %s:7222' % \
(self.config.cluster_name, master_instance.private_ip)
core.ExecuteCmd(cmd)
# if needed, create /mapr as root of all nfs mount points
if not os.path.exists('/mapr'):
core.ExecuteCmd('sudo mkdir /mapr')
core.ExecuteCmd('sudo chmod 777 /mapr')
mount_nfs_cmd = 'sudo mount -o nolock %s:/mapr /mapr' % \
(master_instance.private_ip)
perm_nfs_cmd = 'sudo chmod -R 777 /mapr/%s' % (self.config.cluster_name)
logging.info( 'mounting nfs')
core.ExecuteCmd(mount_nfs_cmd)
logging.info( 'setting nfs permissions')
core.ExecuteCmd(perm_nfs_cmd)
return
def __SetupMasterTopology(self):
master_instance = self.__GetMasterInstance()
ip_to_id = self.__IpsToServerIds()
assert(master_instance.private_ip in ip_to_id)
master_id = ip_to_id[master_instance.private_ip]
# move master node topology to /cldb
assert(master_instance)
cmd = 'node move -serverids %s -topology /cldbonly' % master_id
retval, response = self.__RunMaprCli(cmd)
# create data volume
cmd = 'volume create -name data -path /data -type 0 -mount 1'
retval, response = self.__RunMaprCli(cmd)
return
def __ConfigureMaster(self):
master_instance = self.__GetMasterInstance()
assert(master_instance)
self.__WaitForInstancesReachable([master_instance])
self.__SetupAccessControl()
root_password_hash = hash.sha256_crypt.encrypt("mapr")
extra_vars = {'cluster_name': self.config.cluster_name,
'master_ip': master_instance.private_ip,
'root_password_hash': root_password_hash,
'is_master' : True}
path = 'playbooks/master.yml'
playbook = pkg_resources.resource_filename(__name__, path)
core.RunPlaybookOnHost(playbook, master_instance.private_ip,
self.ssh_key, extra_vars)
self.__WaitForMasterReady()
self.__SetupMasterTopology()
# instruct the user to log in to web ui and install license and start
# nfs service
web_ui_url = self.__GetWebUiUrl()
print ''
print ''
print ''
print ''
print 'Your master node is ready...'
print ''
print ''
print ''
print ''
print '****************** Please Install Free License ******************'
print '1. Log in to MapR Web UI...'
print ' url: %s' % (web_ui_url)
print ' username: root'
print ' password: mapr'
print ' NOTE: You can safely ignore any web browser SSL error warnings.'
print ' '
print '2. Click "Add License via Web" button and follow on-screen'
print ' instructions to add a free M3 license.'
print ' '
print '3. Return to this console and press any key to continue...'
print ''
# wait for manual steps before continuing
raw_input('After installing M3 license..\n'
'PRESS ANY KEY to launch worker nodes.')
return
def __GetWebUiUrl(self):
master_instance = self.__GetMasterInstance()
web_ui_url = 'https://%s:8443' % (master_instance.public_hostname)
return web_ui_url
def __IsWebUiReady(self):
#TODO rewrite to use __IsUrlLive
web_ui_ready = False
web_ui_url = self.__GetWebUiUrl()
try:
print 'testing: %s' % (web_ui_url)
core.UrlGet(web_ui_url)
web_ui_ready = True
except urllib2.URLError as e:
print e
return web_ui_ready
def __IsUrlLive(self, url):
ready = False
try:
print 'testing: %s' % (url)
core.UrlGet(url)
ready = True
except:
print '.'
#logging.info( 'error checking if url is live: %s' % (url))
return ready
def __WaitForUrlReady(self, url):
print 'waiting for url to be ready...'
ready = self.__IsUrlLive(url)
while not ready:
time.sleep(5)
ready = self.__IsUrlLive(url)
return
def __WaitForMasterReady(self):
print 'waiting for web ui to be ready...'
master_instance = self.__GetMasterInstance()
web_ui_ready = self.__IsWebUiReady()
count = 0
while True:
if self.__IsWebUiReady():
break
count += 1
if count > 12:
count = 0
raise RuntimeError('web service probably did not start...')
time.sleep(1)
return
def __GetSecurityGroup(self, name):
return self.ec2.get_all_security_groups(groupnames=[name])[0]
def __SetupAccessControl(self):
# modify security group to allow this machine
# (i.e. those in workstation group) to ssh to cluster nodes
client_group = self.__GetSecurityGroup(core.workstation_security_group)
cluster_group = self.__GetSecurityGroup(self.config.cluster_name)
# be sure this group not yet authorized, or next command fails
cluster_group.revoke(src_group=client_group)
cluster_group.authorize(src_group=client_group)
return
def __AddWorkers(self, num_to_add):
""" Adds workers evenly across all enabled zones."""
# Check preconditions
assert(self.__IsWebUiReady())
zone_to_ips = self.__GetZoneToWorkerIpsTable()
zone_old_new = []
for zone, ips in zone_to_ips.iteritems():
num_nodes_in_zone = len(ips)
num_nodes_to_add = 0
zone_old_new.append((zone, num_nodes_in_zone, num_nodes_to_add))
print 'num_to_add %s' % num_to_add
for _ in range(num_to_add):
zone_old_new.sort(key= lambda z : z[1]+z[2])
zt = zone_old_new[0]
zone_old_new[0] = (zt[0], zt[1], zt[2]+1)
#print zone_old_new
zone_plan = [(zt[2], zt[0]) for zt in zone_old_new]
print 'resize plan'
if self.config.workers_on_spot_instances:
new_worker_instances = self.__LaunchSpotWorkerInstances(zone_plan)
else:
new_worker_instances = self.__LaunchOnDemandWorkerInstances(zone_plan)
self.__WaitForInstancesReachable(new_worker_instances)
self.__ConfigureWorkers(new_worker_instances)
return
# def __TerminateUnreachableInstances(self, instances):
# unreachable_instances = core.GetUnreachableInstances(instances,
# self.ssh_key)
# print 'unreachable_instances: %s' % (unreachable_instances)
# self.cluster.ec2.terminate_instances(instances)
# return
def __GetIpsFromCldb(self):
""" Gets ip of workers that are live in cldb """
ip_to_id = self.__IpsToServerIds()
return ip_to_id.keys()
def __GetMissingWorkers(self):
cldb_ip_set = set(self.__GetIpsFromCldb())
instances = self.__GetWorkerInstances()
missing_workers = []
for instance in instances:
if instance.private_ip not in cldb_ip_set:
logging.info('we have a worker that is running but not in cldb: %s' % \
(instance))
missing_workers.append(instance)
return missing_workers
def __GetZoneForWorker(self, worker_ip):
cldb_ip_set = set(self.__GetIpsFromCldb())
group = self.cluster._group_name_for_role('spotworker')
raw_instances = self.cluster._get_instances(group, 'running')
# group workers by zone
workers_zone = None
for raw_instance in raw_instances:
zone_name = raw_instance.placement
ip = raw_instance.private_ip_address
if ip == worker_ip:
workers_zone = zone_name
break
return workers_zone
def __GetZoneToWorkerIpsTable(self):
cldb_ip_set = set(self.__GetIpsFromCldb())
group = self.cluster._group_name_for_role('spotworker')
raw_instances = self.cluster._get_instances(group, 'running')
zone_to_ip = {}
for i, zone in enumerate(self.config.zones):
zone_name = self._GetAvailabilityZoneNameByIndex(i)
zone_to_ip[zone_name] = []
# group workers by zone
for raw_instance in raw_instances:
zone_name = raw_instance.placement
ip = raw_instance.private_ip_address
if ip not in cldb_ip_set:
logging.info( 'we have a worker not in cldb: %s' % (ip))
continue
if zone_name not in zone_to_ip:
raise RuntimeError('unexpected condition')
#zone_to_ip[zone] = []
zone_to_ip[zone_name].append(ip)
return zone_to_ip
def __GetWorkNodeTopology(self):
params = {'columns' : 'racktopo' }
result = self.__MaprApi('node/list', params)
valid_cldb_topology_re = re.compile(r"^/cldbonly/.+$")
valid_rack_topology_re = re.compile(r"^(/data/us-.*/rack[0-9]{3})/.+$")
rack_to_nodes = {}
workers_outside_racks = []
for d in result['data']:
ip = d['ip']
node_topo = d['racktopo']
#print ip, node_topo
match = valid_rack_topology_re.match(node_topo)
if match:
rack = match.groups()[0]
if not rack in rack_to_nodes:
rack_to_nodes[rack] = []
rack_to_nodes[rack].append(ip)
else:
if not valid_cldb_topology_re.match(node_topo):
workers_outside_racks.append(ip)
return rack_to_nodes, workers_outside_racks
def __ParseRackTopology(self, input_topology):
rack_topology_parse_re = re.compile(r"^/data/(us-.*)/rack([0-9]{3})$")
match = rack_topology_parse_re.match(input_topology)
rack_zone = None
rack_id = None
if match:
rack_zone = match.groups()[0]
rack_id = int(match.groups()[1])
return rack_zone, rack_id
def __ChangeNodeToplogy(self, server_id, new_topology):
topology = '/data/'
params = {'serverids' : server_id,
'topology' : new_topology}
result = self.__MaprApi('node/move', params)
if result['status'] != 'OK':
raise RuntimeError('Failed to change node topology')
return
#@core.RetryUntilReturnsTrue(tries=10)
def __CreateCacheVolume(self, new_rack_toplogy):
logging.info( '__CreateCacheVolume()')
# set the desired topology for the new volume
params = {'values' : '{"cldb.default.volume.topology":"%s"}' % \
new_rack_toplogy }
result = self.__MaprApi('config/save', params)
if result['status'] != 'OK':
logging.info( 'error')
return False
rack_zone, rack_id = self.__ParseRackTopology(new_rack_toplogy)
assert(rack_zone != None)
volume_name = 'data_deluge_cache_%s_rack%03d' % (rack_zone, rack_id)
mount_path_parent = '/data/deluge/cache/%s' % (rack_zone)
mount_path = '%s/rack%03d' % (mount_path_parent, rack_id)
parent_path = '/mapr/%s/%s' % (self.config.cluster_name, mount_path_parent)
parent_path = parent_path.encode('ascii','ignore') # remove unicode
if not os.path.exists(parent_path):
logging.info( 'creating deluge cache dir: %s' % (parent_path))
os.makedirs(parent_path)
logging.info( 'Deluge cache dir OK: %s' % (parent_path))
# create the new cache volume
params = {'name' : volume_name,
'type' : 0,
'path' : mount_path,
'mount' : 1,
'replication' : 6,
'replicationtype' : 'low_latency',
}
logging.info( 'about to create volume: %s' % (params))
result = self.__MaprApi('volume/create', params)
if result['status'] != 'OK':
raise RuntimeError(result)
logging.info( 'about to configure client')
self.ConfigureClient() # nfs must be active before next commands can work
time.sleep(10)
# set permisions
logging.info( 'about to fix cache permissions...')
perm_nfs_cmd = 'sudo chmod -R 777 /mapr/%s/%s' % (self.config.cluster_name,
mount_path)
core.ExecuteCmd(perm_nfs_cmd)
# reset to the default topology /inactive
params = {'values' : '{"cldb.default.volume.topology":"/inactive"}' }
result = self.__MaprApi('config/save', params)
if result['status'] != 'OK':
logging.info( 'error')
return False
return True
def __SetWorkerTopology(self):
ip_to_id = self.__IpsToServerIds()
# get current state of the cluster topology
rack_to_nodes, workers_outside_racks = self.__GetWorkNodeTopology()
# verify all current racks have at most 6 nodes
for rack, rack_nodes in rack_to_nodes.iteritems():
assert(len(rack_nodes) <= 6)
# check if there are any workers that need to be placed into a rack
new_racks = []
for worker_ip in workers_outside_racks:
logging.info( 'trying to place worker in a rack: %s' % worker_ip)
worker_zone = self.__GetZoneForWorker(worker_ip)
# check if there are any racks in that zone that have fewer than 6 nodes
available_rack = None
for rack, rack_nodes in rack_to_nodes.iteritems():
rack_zone, rack_id = self.__ParseRackTopology(rack)
assert(rack_zone != None)
assert(rack_id != None)
assert(rack_zone == worker_zone)
if rack_zone == worker_zone and len(rack_nodes) < 6:
available_rack = rack
break
# if not, we need to create a new rack
if available_rack == None:
max_rack_id_in_workers_zone = 0
for rack, rack_nodes in rack_to_nodes.iteritems():
rack_zone, rack_id = self.__ParseRackTopology(rack)
if rack_zone == worker_zone:
max_rack_id_in_workers_zone = max(max_rack_id_in_workers_zone,
rack_id)
new_rack_id = max_rack_id_in_workers_zone + 1
new_rack_topology = '/data/%s/rack%03d' % (worker_zone, new_rack_id)
new_racks.append(new_rack_topology)
available_rack = new_rack_topology
assert(available_rack)
server_id = ip_to_id[worker_ip]
self.__ChangeNodeToplogy(server_id, available_rack)
# update current state of the cluster topology
rack_to_nodes, _ = self.__GetWorkNodeTopology()
# get final state of the cluster topology
rack_to_nodes, workers_outside_racks = self.__GetWorkNodeTopology()
#print rack_to_nodes
#print workers_outside_racks
# verify all workers have been placed in a rack
assert(not workers_outside_racks)
# create deluge cache volumes for any newly create racks
# this seems to fail...
# try waiting to see if there may be a race condition causing segfault
#in maprfs
time.sleep(1)
for new_rack in new_racks:
print new_rack
self.__CreateCacheVolume(new_rack)
return
def __ConfigureWorkers(self, worker_instances):
if not worker_instances:
return
hostnames = self.__InstancesToHostnames(worker_instances)
# Verify all workers are reachable before continuing...
core.WaitForHostsReachable(hostnames, self.ssh_key)
master_instance = self.__GetMasterInstance()
master_pub_key = core.ReadRemoteFile('/root/.ssh/id_rsa.pub',
master_instance.public_hostname,
self.ssh_key)
# ensure newline is removed... otherwise there is an error parsing yml!
master_pub_key = master_pub_key.strip()
extra_vars = {'cluster_name': self.config.cluster_name,
'master_ip': master_instance.private_ip,
'master_pub_key': master_pub_key}
path = 'playbooks/worker.yml'
playbook = pkg_resources.resource_filename(__name__, path)
assert(core.RunPlaybookOnHosts(playbook, hostnames, self.ssh_key, extra_vars))
num_cores = self.GetNumCoresPerWorker()
num_map_slots = num_cores
num_reduce_slots = num_cores - 1
assert(self.__ConfigureMapredSite(worker_instances, num_map_slots,
num_reduce_slots))
assert(self.__RestartTaskTrackers(worker_instances))
self.__SetWorkerTopology()
return
def __MaprApi(self, command, params):
#logging.info( '__MaprApi')
master_instance = self.__GetMasterInstance()
command_list = command.split(' ')
command_path = '/'.join(command_list)
mapr_rest_api_url = 'https://%s:8443/rest/%s' % (master_instance.private_ip,
command_path)
#print mapr_rest_api_url
r = None
while r == None:
try:
# the verify requires requests versions >= 2.8
r = requests.post(mapr_rest_api_url, params=params,
auth=('root', 'mapr'), verify=False)
except:
logging.info( 'error!!!')
time.sleep(1)
if r.status_code == 404:
raise RuntimeError('\ninvalid api call: %s.\nReview docs here for help: '\
'http://mapr.com/doc/display/MapR/API+Reference'\
% (r.url))
return r.json()
def __RunMaprCli(self, cmd_str):
cmd = 'sudo maprcli %s' % (cmd_str)
results = self.__RunCommandOnInstances(cmd, [self.__GetMasterInstance()])
exit_code, output = results[0]
return exit_code, output
def __RestartTaskTrackers(self, instances):
node_list = ','.join([i.private_ip for i in instances])
print node_list
params = {'nodes': node_list, 'tasktracker' : 'restart'}
r = self.__MaprApi('node/services', params)
success = True
if r['status'] != 'OK':
logging.info( r)
success = False
return success
def __RestartJobTracker(self):
master_instance = self.__GetMasterInstance()
params = {'nodes': [master_instance.private_hostname],
'jobtracker' : 'restart'}
r = self.__MaprApi('node/services', params)
success = True
if r['status'] != 'OK':
logging.info( r)
success = False
return success
def __InstancesToHostnames(self, instances):
hostnames = [instance.private_ip for instance in instances]
return hostnames
def __WaitForInstancesReachable(self, instances):
assert(instances)
core.WaitForHostsReachable(self.__InstancesToHostnames(instances),
self.ssh_key)
return
def __AreInstancesReachable(self, instances):
return core.AreHostsReachable(self.__InstancesToHostnames(instances),
self.ssh_key)
def __RunCommandOnInstances(self, cmd, instances):
return core.RunCommandOnHosts(cmd,
self.__InstancesToHostnames(instances),
self.ssh_key)
def __LaunchOnDemandMasterInstances(self):
# Compute a good bid price based on recent price history
prefered_master_zone = self._GetAvailabilityZoneNameByIndex(0)
role = 'master'
ami = core.LookupCirrusAmi(self.ec2,
self.config.cluster_instance_type,
self.config.ubuntu_release_name,
self.config.mapr_version,
role,
self.config.ami_release_name,
self.config.mapr_ami_owner_id)
assert(ami)
number_zone_list = [(1, prefered_master_zone)]
new_instances = self.cluster.launch_and_wait_for_demand_instances(
role='master',
image_id=ami.id,
instance_type=self.config.cluster_instance_type,
private_key_name=self.cluster_keypair_name,
number_zone_list=number_zone_list)
assert(new_instances)
return new_instances
def __LaunchSpotMasterInstances(self):
# Compute a good bid price based on recent price history
prefered_master_zone = self._GetAvailabilityZoneNameByIndex(0)
num_days = 1
cur_price = self.cluster.get_current_spot_instance_price(
self.config.cluster_instance_type, prefered_master_zone)
recent_max_price = self.cluster.get_recent_max_spot_instance_price(
self.config.cluster_instance_type, prefered_master_zone, num_days)
high_availability_bid_price = recent_max_price + 0.10
print 'current price: %f' % (cur_price)
print 'high_availability_bid_price: %f' % (high_availability_bid_price)
assert(cur_price < 0.35) # sanity check so we don't do something stupid
assert(high_availability_bid_price < 1.25) # sanity check
# get the ami preconfigured as a master mapr node
role = 'master'
ami = core.LookupCirrusAmi(self.ec2,
self.config.cluster_instance_type,
self.config.ubuntu_release_name,
self.config.mapr_version,
role,
self.config.ami_release_name,
self.config.mapr_ami_owner_id)
if not ami:
raise RuntimeError('failed to find suitable ami: %s' % self.config)
number_zone_list = [(1, prefered_master_zone)]
ids = self.cluster.launch_and_wait_for_spot_instances(
price=high_availability_bid_price,
role='master',
image_id=ami.id,
instance_type=self.config.cluster_instance_type,
private_key_name=self.cluster_keypair_name,
number_zone_list=number_zone_list)
return ids
def __LaunchOnDemandWorkerInstances(self, number_zone_list):
role = 'worker'
ami = core.LookupCirrusAmi(self.ec2,
self.config.cluster_instance_type,
self.config.ubuntu_release_name,
self.config.mapr_version,
role,
self.config.ami_release_name,
self.config.mapr_ami_owner_id)
assert(ami)
new_instances = self.cluster.launch_and_wait_for_demand_instances(
role='spotworker',
image_id=ami.id,
instance_type=self.config.cluster_instance_type,
private_key_name=self.cluster_keypair_name,
number_zone_list=number_zone_list)
assert(new_instances)
return new_instances
def __LaunchSpotWorkerInstances(self, number_zone_list):
max_zone_price = 0
for i, zone in enumerate(self.config.zones):
cur_zone_price = self.cluster.get_current_spot_instance_price(
self.config.cluster_instance_type,
self._GetAvailabilityZoneNameByIndex(i))
logging.info( '%s %f' % (zone, cur_zone_price))
max_zone_price = max(max_zone_price, cur_zone_price)
bid_price = max_zone_price + 0.10
assert(bid_price < 0.5) # safety check!
print 'bid_price: %f' % (bid_price)
role = 'worker'
ami = core.LookupCirrusAmi(self.ec2,
self.config.cluster_instance_type,
self.config.ubuntu_release_name,
self.config.mapr_version,
role,
self.config.ami_release_name,
self.config.mapr_ami_owner_id)
assert(ami)
new_instances = self.cluster.launch_and_wait_for_spot_instances(
price=bid_price,
role='spotworker',
image_id=ami.id,
instance_type=self.config.cluster_instance_type,
private_key_name=self.cluster_keypair_name,
number_zone_list=number_zone_list)
return new_instances
def __GetMasterInstance(self):
master_instance = None
instances = self.cluster.get_instances_in_role("master", "running")
if instances:
assert(len(instances) == 1)
master_instance = instances[0]
return master_instance
def __GetAllInstances(self):
instances = self.cluster.get_instances()
return instances
def __GetWorkerInstances(self):
instances = self.cluster.get_instances_in_role("spotworker", "running")
return instances
def __GetCldbRegisteredWorkerInstances(self):
all_instances = self.__GetWorkerInstances()
cldb_active_ips = set(self.__GetIpsFromCldb())
instances = [i for i in all_instances if i.private_ip in cldb_active_ips]
return instances
def __IpsToServerIds(self):
""" Get list of mapping of ip address into a server id"""
master_instance = self.__GetMasterInstance()
assert(master_instance)
retval, response = self.__RunMaprCli('node list -columns id')
ip_to_id = {}
for line_num, line in enumerate(response.split('\n')):
tokens = line.split()
if len(tokens) == 3 and tokens[0] != 'id':
instance_id = tokens[0]
ip = tokens[2]
ip_to_id[ip] = instance_id
return ip_to_id
def __FixCachePermissions(self):
# Note: tried to move this to the image generation stage, but these
# paths don't exist because ephemeral devices are created after boot
instances = self.__GetWorkerInstances()
mapr_cache_path = '/opt/mapr/logs/cache'
cmd = "sudo mkdir -p %s; sudo chmod -R 755 %s; sudo mkdir %s/hadoop/; "\
"sudo mkdir %s/tmp; sudo chmod -R 1777 %s/tmp" % \
(mapr_cache_path, mapr_cache_path, mapr_cache_path, mapr_cache_path,
mapr_cache_path)
self.__RunCommandOnInstances(cmd, instances)
return
def __CleanUpCoresAlarm(self):
""" Disable core alarm.
Mapr signals an alarm in the web ui when there are files in a nodes
/opt/cores dir. This is anoying because we can't see if there is a real
problem with the node because of this pointless error message.
This removes those files so the alarm goes away.
"""
instances = self.__GetAllInstances()
clean_cores_cmd = """sudo sh -c 'rm /opt/cores/*'"""
self.__RunCommandOnInstances(clean_cores_cmd, instances)
return
def __CleanUpRootPartition(self):
"""
Mapr signals an alarm in the web ui when there are files in a nodes
/opt/cores dir. This is anoying because we can't see if there is a real
problem with the node because of this pointless error message.
This removes those files so the alarm goes away.
"""
instances = self.__GetAllInstances()
clean_cores_cmd = """sudo sh -c 'rm -rf /opt/mapr/cache/tmp/*'"""
self.__RunCommandOnInstances(clean_cores_cmd, instances)
return
| cirruscluster/cirruscluster | cirruscluster/cluster/mapr.py | Python | mit | 42,610 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from celery.schedules import crontab
from indico.core.db import db
from indico.util.date_time import now_utc
from indico.core.celery import celery
from indico.modules.events import Event
from indico.modules.events.reminders import logger
from indico.modules.events.reminders.models.reminders import EventReminder
@celery.periodic_task(name='event_reminders', run_every=crontab(minute='*/5'))
def send_event_reminders():
reminders = EventReminder.find_all(~EventReminder.is_sent, ~Event.is_deleted,
EventReminder.scheduled_dt <= now_utc(),
_join=EventReminder.event_new)
try:
for reminder in reminders:
logger.info('Sending event reminder: %s', reminder)
reminder.send()
finally:
# If we fail at any point during the loop, we'll still commit
# the is_sent change for already-sent reminders instead of
# sending them over and over and thus spamming people.
db.session.commit()
| belokop/indico_bare | indico/modules/events/reminders/tasks.py | Python | gpl-3.0 | 1,801 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-17 01:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('address', models.CharField(max_length=100)),
],
),
]
| fernandolobato/balarco | clients/migrations/0001_initial.py | Python | mit | 614 |
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for jsonchecker.py."""
import webkitpy.thirdparty.unittest2 as unittest
import jsonchecker
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class JSONCheckerTest(unittest.TestCase):
"""Tests JSONChecker class."""
def test_line_number_from_json_exception(self):
tests = (
(0, 'No JSON object could be decoded'),
(2, 'Expecting property name: line 2 column 1 (char 2)'),
(3, 'Expecting object: line 3 column 1 (char 15)'),
(9, 'Expecting property name: line 9 column 21 (char 478)'),
)
for expected_line, message in tests:
self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
def assert_no_error(self, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
error_handler = MockErrorHandler(handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.turned_off_filtering)
def assert_error(self, expected_line_number, expected_category, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.assertIn(category, jsonchecker.JSONChecker.categories)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
def mock_handle_style_error(self):
pass
def test_conflict_marker(self):
self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
def test_single_quote(self):
self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
def test_init(self):
error_handler = MockErrorHandler(self.mock_handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
self.assertEqual(checker._handle_style_error, error_handler)
def test_no_error(self):
self.assert_no_error("""{
"slaves": [ { "name": "test-slave", "platform": "*" },
{ "name": "apple-xserve-4", "platform": "mac-snowleopard" }
],
"builders": [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
"platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
"slavenames": ["apple-xserve-4"]
}
],
"schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
"builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
}
]
}
""")
| indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py | Python | apache-2.0 | 5,024 |
"""Universal feed parser
Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
Visit https://code.google.com/p/feedparser/ for the latest version
Visit http://packages.python.org/feedparser/ for the latest documentation
Required: Python 2.4 or later
Recommended: iconv_codec <http://cjkpython.i18n.org/>
"""
__version__ = "5.1.3"
__license__ = """
Copyright (c) 2010-2012 Kurt McKee <[email protected]>
Copyright (c) 2002-2008 Mark Pilgrim
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>",
"Ade Oshineye <http://blog.oshineye.com/>",
"Martin Pool <http://sourcefrog.net/>",
"Kurt McKee <http://kurtmckee.org/>",
"Bernd Schlapsi <https://github.com/brot>",]
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
#USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
#ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
ACCEPT_HEADER = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# If you want feedparser to automatically parse microformat content embedded
# in entry contents, set this to 1
PARSE_MICROFORMATS = 1
# ---------- Python 3 modules (make it work if possible) ----------
try:
import rfc822
except ImportError:
from email import _parseaddr as rfc822
try:
# Python 3.1 introduces bytes.maketrans and simultaneously
# deprecates string.maketrans; use bytes.maketrans if possible
_maketrans = bytes.maketrans
except (NameError, AttributeError):
import string
_maketrans = string.maketrans
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
try:
if bytes is str:
# In Python 2.5 and below, bytes doesn't exist (NameError)
# In Python 2.6 and above, bytes and str are the same type
raise NameError
except NameError:
# Python 2
def _s2bytes(s):
return s
def _l2bytes(l):
return ''.join(map(chr, l))
else:
# Python 3
def _s2bytes(s):
return bytes(s, 'utf8')
def _l2bytes(l):
return bytes(l)
# If you want feedparser to allow all URL schemes, set this to ()
# List culled from Python's urlparse documentation at:
# http://docs.python.org/library/urlparse.html
# as well as from "URI scheme" at Wikipedia:
# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
# Many more will likely need to be added!
ACCEPTABLE_URI_SCHEMES = (
'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
'wais',
# Additional common-but-unofficial schemes
'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
)
#ACCEPTABLE_URI_SCHEMES = ()
# ---------- required modules (should come with any Python distribution) ----------
import cgi
import codecs
import copy
import datetime
import re
import struct
import time
import types
import urllib
import urllib2
import urlparse
import warnings
from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
try:
from io import BytesIO as _StringIO
except ImportError:
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except ImportError:
gzip = None
try:
import zlib
except ImportError:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
from xml.sax.saxutils import escape as _xmlescape
except ImportError:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
else:
try:
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
except xml.sax.SAXReaderNotAvailable:
_XML_AVAILABLE = 0
else:
_XML_AVAILABLE = 1
# sgmllib is not available by default in Python 3; if the end user doesn't have
# it available then we'll lose illformed XML parsing, content santizing, and
# microformat support (at least while feedparser depends on BeautifulSoup).
try:
import sgmllib
except ImportError:
# This is probably Python 3, which doesn't include sgmllib anymore
_SGML_AVAILABLE = 0
# Mock sgmllib enough to allow subclassing later on
class sgmllib(object):
class SGMLParser(object):
def goahead(self, i):
pass
def parse_starttag(self, i):
pass
else:
_SGML_AVAILABLE = 1
# sgmllib defines a number of module-level regular expressions that are
# insufficient for the XML parsing feedparser needs. Rather than modify
# the variables directly in sgmllib, they're defined here using the same
# names, and the compiled code objects of several sgmllib.SGMLParser
# methods are copied into _BaseHTMLProcessor so that they execute in
# feedparser's scope instead of sgmllib's scope.
charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
)
# Unfortunately, these must be copied over to prevent NameError exceptions
entityref = sgmllib.entityref
incomplete = sgmllib.incomplete
interesting = sgmllib.interesting
shorttag = sgmllib.shorttag
shorttagopen = sgmllib.shorttagopen
starttagopen = sgmllib.starttagopen
class _EndBracketRegEx:
def __init__(self):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self, target, index=0):
match = self.endbracket.match(target, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
return EndBracketMatch(match)
return None
class EndBracketMatch:
def __init__(self, match):
self.match = match
def start(self, n):
return self.match.end(n)
endbracket = _EndBracketRegEx()
# iconv_codec provides support for more character encodings.
# It's available from http://cjkpython.i18n.org/
try:
import iconv_codec
except ImportError:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
except ImportError:
chardet = None
# BeautifulSoup is used to extract microformat content from HTML
# feedparser is tested using BeautifulSoup 3.2.0
# http://www.crummy.com/software/BeautifulSoup/
try:
import BeautifulSoup
except ImportError:
BeautifulSoup = None
PARSE_MICROFORMATS = False
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
SUPPORTED_VERSIONS = {'': u'unknown',
'rss090': u'RSS 0.90',
'rss091n': u'RSS 0.91 (Netscape)',
'rss091u': u'RSS 0.91 (Userland)',
'rss092': u'RSS 0.92',
'rss093': u'RSS 0.93',
'rss094': u'RSS 0.94',
'rss20': u'RSS 2.0',
'rss10': u'RSS 1.0',
'rss': u'RSS (unknown version)',
'atom01': u'Atom 0.1',
'atom02': u'Atom 0.2',
'atom03': u'Atom 0.3',
'atom10': u'Atom 1.0',
'atom': u'Atom (unknown version)',
'cdf': u'CDF',
}
class FeedParserDict(dict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['summary', 'subtitle'],
'description_detail': ['summary_detail', 'subtitle_detail'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
try:
return dict.__getitem__(self, 'tags')[0]['term']
except IndexError:
raise KeyError, "object doesn't have key 'category'"
elif key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
elif key == 'license':
for link in dict.__getitem__(self, 'links'):
if link['rel']==u'license' and 'href' in link:
return link['href']
elif key == 'updated':
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
if not dict.__contains__(self, 'updated') and \
dict.__contains__(self, 'published'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated` to `published` if `updated` doesn't "
"exist. This fallback will be removed in a future version "
"of feedparser.", DeprecationWarning)
return dict.__getitem__(self, 'published')
return dict.__getitem__(self, 'updated')
elif key == 'updated_parsed':
if not dict.__contains__(self, 'updated_parsed') and \
dict.__contains__(self, 'published_parsed'):
warnings.warn("To avoid breaking existing software while "
"fixing issue 310, a temporary mapping has been created "
"from `updated_parsed` to `published_parsed` if "
"`updated_parsed` doesn't exist. This fallback will be "
"removed in a future version of feedparser.",
DeprecationWarning)
return dict.__getitem__(self, 'published_parsed')
return dict.__getitem__(self, 'updated_parsed')
else:
realkey = self.keymap.get(key, key)
if isinstance(realkey, list):
for k in realkey:
if dict.__contains__(self, k):
return dict.__getitem__(self, k)
elif dict.__contains__(self, realkey):
return dict.__getitem__(self, realkey)
return dict.__getitem__(self, key)
def __contains__(self, key):
if key in ('updated', 'updated_parsed'):
# Temporarily help developers out by keeping the old
# broken behavior that was reported in issue 310.
# This fix was proposed in issue 328.
return dict.__contains__(self, key)
try:
self.__getitem__(key)
except KeyError:
return False
else:
return True
has_key = __contains__
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __setitem__(self, key, value):
key = self.keymap.get(key, key)
if isinstance(key, list):
key = key[0]
return dict.__setitem__(self, key, value)
def setdefault(self, key, value):
if key not in self:
self[key] = value
return value
return self[key]
def __getattr__(self, key):
# __getattribute__() is called first; this will be called
# only if an attribute was not already found
try:
return self.__getitem__(key)
except KeyError:
raise AttributeError, "object has no attribute '%s'" % key
def __hash__(self):
return id(self)
_cp1252 = {
128: unichr(8364), # euro sign
130: unichr(8218), # single low-9 quotation mark
131: unichr( 402), # latin small letter f with hook
132: unichr(8222), # double low-9 quotation mark
133: unichr(8230), # horizontal ellipsis
134: unichr(8224), # dagger
135: unichr(8225), # double dagger
136: unichr( 710), # modifier letter circumflex accent
137: unichr(8240), # per mille sign
138: unichr( 352), # latin capital letter s with caron
139: unichr(8249), # single left-pointing angle quotation mark
140: unichr( 338), # latin capital ligature oe
142: unichr( 381), # latin capital letter z with caron
145: unichr(8216), # left single quotation mark
146: unichr(8217), # right single quotation mark
147: unichr(8220), # left double quotation mark
148: unichr(8221), # right double quotation mark
149: unichr(8226), # bullet
150: unichr(8211), # en dash
151: unichr(8212), # em dash
152: unichr( 732), # small tilde
153: unichr(8482), # trade mark sign
154: unichr( 353), # latin small letter s with caron
155: unichr(8250), # single right-pointing angle quotation mark
156: unichr( 339), # latin small ligature oe
158: unichr( 382), # latin small letter z with caron
159: unichr( 376), # latin capital letter y with diaeresis
}
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
#try:
if not isinstance(uri, unicode):
uri = uri.decode('utf-8', 'ignore')
uri = urlparse.urljoin(base, uri)
if not isinstance(uri, unicode):
return uri.decode('utf-8', 'ignore')
return uri
#except:
# uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
# return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = [u'text/html', u'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or u''
self.lang = baselang or None
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
def _normalize_attributes(self, kv):
k = kv[0].lower()
v = k in ('rel', 'type') and kv[1].lower() or kv[1]
# the sgml parser doesn't handle entities in attributes, nor
# does it pass the attribute values through as unicode, while
# strict xml parsers do -- account for this difference
if isinstance(self, _LooseFeedParser):
v = v.replace('&', '&')
if not isinstance(v, unicode):
v = v.decode('utf-8')
return (k, v)
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = map(self._normalize_attributes, attrs)
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if not isinstance(baseuri, unicode):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = u'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == u'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = u'text/plain'
elif contentType == 'html':
contentType = u'text/html'
elif contentType == 'xhtml':
contentType = u'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = u'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = u'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = u'atom10'
if loweruri.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = u'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or u'', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if not isinstance(v, unicode):
pieces[i] = v.decode('utf-8')
output = u''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = u'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
if self.encoding and not isinstance(output, unicode):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if isinstance(output, unicode):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',s)):
return
# all entities must have been defined as valid HTML entities
if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith(u'text/'):
return 0
if self.contentparams['type'].endswith(u'+xml'):
return 0
if self.contentparams['type'].endswith(u'/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': u'rss091u',
'0.92': u'rss092',
'0.93': u'rss093',
'0.94': u'rss094'}
#If we're here then this is an RSS feed.
#If we don't have a version or have a version that starts with something
#other than RSS then there's been a mistake. Correct it.
if not self.version or not self.version.startswith(u'rss'):
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = u'rss20'
else:
self.version = u'rss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
def _cdf_common(self, attrsD):
if 'lastmod' in attrsD:
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if 'href' in attrsD:
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': u'atom01',
'0.2': u'atom02',
'0.3': u'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = u'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
if not self.inentry:
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.title_depth = -1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.title_depth = -1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
# Append a new FeedParserDict when expecting an author
context = self._getContext()
context.setdefault('authors', [])
context['authors'].append(FeedParserDict())
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except ValueError:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = u'%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, u'')
author = author.replace(u'()', u'')
author = author.replace(u'<>', u'')
author = author.replace(u'<>', u'')
author = author.strip()
if author and (author[0] == u'('):
author = author[1:]
if author and (author[-1] == u')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, u'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, u'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
self.title_depth = -1
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
_start_pubdate = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value), overwrite=True)
_end_dcterms_issued = _end_published
_end_issued = _end_published
_end_pubdate = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_dc_date = _start_updated
_start_lastbuilddate = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value, overwrite=True)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_dc_date = _end_updated
_end_lastbuilddate = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value), overwrite=True)
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel'] = u'license'
if value:
attrsD['href'] = value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _start_media_category(self, attrsD):
attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
self._start_category(attrsD)
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split(','):
if term.strip():
self._addTag(term.strip(), u'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value:
return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
_end_media_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', u'alternate')
if attrsD['rel'] == u'self':
attrsD.setdefault('type', u'application/atom+xml')
else:
attrsD.setdefault('type', u'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
if not (self.inentry and self.inimage):
context['links'].append(FeedParserDict(attrsD))
if 'href' in attrsD:
expectingText = 0
if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
def _end_link(self):
value = self.pop('link')
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
_start_id = _start_guid
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and 'link' not in self._getContext())
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
_end_id = _end_guid
def _start_title(self, attrsD):
if self.svgOK:
return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK:
return
value = self.popContent('title')
if not value:
return
self.title_depth = self.depth
_end_dc_title = _end_title
def _end_media_title(self):
title_depth = self.title_depth
self._end_title()
self.title_depth = title_depth
def _start_description(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, u'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if 'href' in attrsD:
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if 'generator_detail' in context:
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if 'summary' in context:
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel'] = u'enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
def _start_source(self, attrsD):
if 'url' in attrsD:
# This means that we're processing a source element from an RSS 2.0 feed
self.sourcedata['href'] = attrsD[u'url']
self.push('source', 1)
self.insource = 1
self.title_depth = -1
def _end_source(self):
self.insource = 0
value = self.pop('source')
if value:
self.sourcedata['title'] = value
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, u'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, u'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
value = self.popContent('content')
if copyToSummary:
self._save('summary', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
if attrsD.get('href'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
elif attrsD.get('url'):
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
# Convert 'yes' -> True, 'clean' to False, and any other value to None
# False and None both evaluate as False, so the difference can be ignored
# by applications that only need to know if the content is explicit.
self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
def _start_media_content(self, attrsD):
context = self._getContext()
context.setdefault('media_content', [])
context['media_content'].append(attrsD)
def _start_media_thumbnail(self, attrsD):
context = self._getContext()
context.setdefault('media_thumbnail', [])
self.push('url', 1) # new
context['media_thumbnail'].append(attrsD)
def _end_media_thumbnail(self):
url = self.pop('url')
context = self._getContext()
if url != None and len(url.strip()) != 0:
if 'url' not in context['media_thumbnail'][-1]:
context['media_thumbnail'][-1]['url'] = url
def _start_media_player(self, attrsD):
self.push('media_player', 0)
self._getContext()['media_player'] = FeedParserDict(attrsD)
def _end_media_player(self):
value = self.pop('media_player')
context = self._getContext()
context['media_player']['content'] = value
def _start_newlocation(self, attrsD):
self.push('newlocation', 1)
def _end_newlocation(self):
url = self.pop('newlocation')
context = self._getContext()
# don't set newlocation if the context isn't right
if context is not self.feeddata:
return
context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
self.decls = {}
def startPrefixMapping(self, prefix, uri):
if not uri:
return
# Jython uses '' instead of None; standardize on None
prefix = prefix or None
self.trackNamespace(prefix, uri)
if prefix and uri == 'http://www.w3.org/1999/xlink':
self.decls['xmlns:' + prefix] = uri
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find(u'backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = u'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD, self.decls = self.decls, {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
for (namespace, attrlocalname), attrvalue in attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
# drv_libxml2 calls warning() in some cases
warning = error
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
'source', 'track', 'wbr'
])
def __init__(self, encoding, _type):
self.encoding = encoding
self._type = _type
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
# By declaring these methods and overriding their compiled code
# with the code from sgmllib, the original code will execute in
# feedparser's scope instead of sgmllib's. This means that the
# `tagfind` and `charref` regular expressions will be found as
# they're declared above, not as they're declared in sgmllib.
def goahead(self, i):
pass
goahead.func_code = sgmllib.SGMLParser.goahead.func_code
def __parse_starttag(self, i):
pass
__parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
def parse_starttag(self,i):
j = self.__parse_starttag(i)
if self._type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
try:
bytes
if bytes is str:
raise NameError
self.encoding = self.encoding + u'_INVALID_PYTHON_3'
except NameError:
if self.encoding and isinstance(data, unicode):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs:
return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if not isinstance(value, unicode):
value = value.decode(self.encoding, 'ignore')
try:
# Currently, in Python 3 the key is already a str, and cannot be decoded again
uattrs.append((unicode(key, self.encoding), value))
except TypeError:
uattrs.append((key, value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs = strattrs.encode(self.encoding)
except (UnicodeEncodeError, LookupError):
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%s%s />' % (tag, strattrs))
else:
self.pieces.append('<%s%s>' % (tag, strattrs))
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%s>" % tag)
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
ref = ref.lower()
if ref.startswith('x'):
value = int(ref[1:], 16)
else:
value = int(ref)
if value in _cp1252:
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%s;' % ref)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if ref in name2codepoint or ref == 'apos':
self.pieces.append('&%s;' % ref)
else:
self.pieces.append('&%s' % ref)
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%s-->' % text)
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%s>' % text)
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%s>' % text)
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
def parse_declaration(self, i):
try:
return sgmllib.SGMLParser.parse_declaration(self, i)
except sgmllib.SGMLParseError:
# escape the doctype declaration and continue parsing
self.handle_data('<')
return i+1
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if not self.contentparams.get('type', u'xml').endswith(u'xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
known_binary_extensions = set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if isinstance(data, unicode):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if isinstance(s, basestring):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple:
return []
elif iPropertyType == self.STRING:
return ''
elif iPropertyType == self.DATE:
return None
elif iPropertyType == self.URI:
return ''
elif iPropertyType == self.NODE:
return None
else:
return None
arValues = []
for elmResult in arResults:
sValue = None
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a':
sValue = elmResult.get('href')
elif sNodeName == 'img':
sValue = elmResult.get('src')
elif sNodeName == 'object':
sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or u''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
# Completely remove the agent element from the parse tree
elmAgent.extract()
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
# XXX - this is super ugly; properly fix this with issue 148
for i, s in enumerate(arLines):
if not isinstance(s, unicode):
arLines[i] = s.decode('utf-8', 'ignore')
sVCards += u'\n'.join(arLines) + u'\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if 'href' not in attrsD:
return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
try:
path = urlparse.urlparse(attrsD['href'])[2]
except ValueError:
return 0
if path.find('.') == -1:
return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href:
continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
if segments:
tag = segments.pop()
else:
# there are no tags
continue
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', u'').split()
xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup:
return
try:
p = _MicroformatsParser(htmlSource, baseURI, encoding)
except UnicodeEncodeError:
# sgmllib throws this exception when performing lookups of tags
# with non-ASCII characters in them.
return
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = set([('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src'),
('video', 'poster')])
def __init__(self, baseuri, encoding, _type):
_BaseHTMLProcessor.__init__(self, encoding, _type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _RelativeURIResolver(baseURI, encoding, _type)
p.feed(htmlSource)
return p.output()
def _makeSafeAbsoluteURI(base, rel=None):
# bail if ACCEPTABLE_URI_SCHEMES is empty
if not ACCEPTABLE_URI_SCHEMES:
try:
return _urljoin(base, rel or u'')
except ValueError:
return u''
if not base:
return rel or u''
if not rel:
try:
scheme = urlparse.urlparse(base)[0]
except ValueError:
return u''
if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
return base
return u''
try:
uri = _urljoin(base, rel)
except ValueError:
return u''
if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
return u''
return uri
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang'])
unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
acceptable_css_properties = set(['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width'])
# survey of common keywords found in feeds
acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow'])
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none', 'semantics'])
mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use'])
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
'min', 'name', 'offset', 'opacity', 'orient', 'origin',
'overline-position', 'overline-thickness', 'panose-1', 'path',
'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
'stop-color', 'stop-opacity', 'strikethrough-position',
'strikethrough-thickness', 'stroke', 'stroke-dasharray',
'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
'y2', 'zoomAndPan'])
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity'])
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# add implicit namespaces to html5 inline svg/mathml
if self._type.endswith('html'):
if not dict(attrs).get('xmlns'):
if tag=='svg':
attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
if tag=='math':
attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK += 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK += 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
# make sure the uri uses an acceptable uri scheme
if key == u'href':
value = _makeSafeAbsoluteURI(value)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value:
clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK:
self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg' and self.svgOK:
self.svgOK -= 1
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
return ''
# This replaced a regexp that used re.match and was prone to pathological back-tracking.
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value:
continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def parse_comment(self, i, report=1):
ret = _BaseHTMLProcessor.parse_comment(self, i, report)
if ret >= 0:
return ret
# if ret == -1, this may be a malicious attempt to circumvent
# sanitization, or a page-destroying unclosed comment
match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
if match:
return match.end()
# unclosed comment; deliberately fail to handle_data()
return len(self.rawdata)
def _sanitizeHTML(htmlSource, encoding, _type):
if not _SGML_AVAILABLE:
return htmlSource
p = _HTMLSanitizer(encoding, _type)
htmlSource = htmlSource.replace('<![CDATA[', '<![CDATA[')
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = isinstance(data, unicode)
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
# The default implementation just raises HTTPError.
# Forget that.
fp.status = code
return fp
def http_error_301(self, req, fp, code, msg, hdrs):
result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
code, msg, hdrs)
result.status = code
result.newurl = result.geturl()
return result
# The default implementations in urllib2.HTTPRedirectHandler
# are identical, so hardcoding a http_error_301 call above
# won't affect anything
http_error_300 = http_error_301
http_error_302 = http_error_301
http_error_303 = http_error_301
http_error_307 = http_error_301
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
if base64 is None or 'Authorization' not in req.headers \
or 'WWW-Authenticate' not in headers:
return self.http_error_default(req, fp, code, msg, headers)
auth = _base64decode(req.headers['Authorization'].split(' ')[1])
user, passw = auth.split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
if request_headers is supplied it is a dictionary of HTTP request headers
that will override the values generated by FeedParser.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if isinstance(url_file_stream_or_string, basestring) \
and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
# Deal with the feed URI scheme
if url_file_stream_or_string.startswith('feed:http'):
url_file_stream_or_string = url_file_stream_or_string[5:]
elif url_file_stream_or_string.startswith('feed:'):
url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
if not agent:
agent = USER_AGENT
# Test for inline user:password credentials for HTTP basic auth
auth = None
if base64 and not url_file_stream_or_string.startswith('ftp:'):
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.standard_b64encode(user_passwd).strip()
# iri support
if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
# try to open with urllib2 (to use optional headers)
request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string, 'rb')
except (IOError, UnicodeEncodeError, TypeError):
# if url_file_stream_or_string is a unicode object that
# cannot be converted to the encoding returned by
# sys.getfilesystemencoding(), a UnicodeEncodeError
# will be thrown
# If url_file_stream_or_string is a string that contains NULL
# (such as an XML document encoded in UTF-32), TypeError will
# be thrown.
pass
# treat url_file_stream_or_string as string
if isinstance(url_file_stream_or_string, unicode):
return _StringIO(url_file_stream_or_string.encode('utf-8'))
return _StringIO(url_file_stream_or_string)
def _convert_to_idn(url):
"""Convert a URL to IDN notation"""
# this function should only be called with a unicode string
# strategy: if the host cannot be encoded in ascii, then
# it'll be necessary to encode it in idn form
parts = list(urlparse.urlsplit(url))
try:
parts[1].encode('ascii')
except UnicodeEncodeError:
# the url needs to be converted to idn notation
host = parts[1].rsplit(':', 1)
newhost = []
port = u''
if len(host) == 2:
port = host.pop()
for h in host[0].split('.'):
newhost.append(h.encode('idna').decode('utf-8'))
parts[1] = '.'.join(newhost)
if port:
parts[1] += ':' + port
return urlparse.urlunsplit(parts)
else:
return url
def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
request = urllib2.Request(url)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if isinstance(modified, basestring):
modified = _parse_date(modified)
elif isinstance(modified, datetime.datetime):
modified = modified.utctimetuple()
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
# [('Cookie','Something'),('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
return request
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m or m.group(2) not in _hungarian_months:
return None
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
# Modified to also support MSSQL-style datetimes as defined at:
# http://msdn.microsoft.com/en-us/library/ms186724.aspx
# (which basically means allowing a space as a date/time/timezone separator)
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
'|(?P<julian>\d\d\d)))?')
__tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
+ __tzd_re)
__datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString):
return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0:
return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
# Define the strings used by the RFC822 datetime parser
_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# Only the first three letters of the month name matter
_rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
# The year may be 2 or 4 digits; capture the century if it exists
_rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
_rfc822_day = "(?P<day> *\d{1,2})"
_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
_rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
_rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
_rfc822_tznames = {
'ut': 0, 'gmt': 0, 'z': 0,
'adt': -3, 'ast': -4, 'at': -4,
'edt': -4, 'est': -5, 'et': -5,
'cdt': -5, 'cst': -6, 'ct': -6,
'mdt': -6, 'mst': -7, 'mt': -7,
'pdt': -7, 'pst': -8, 'pt': -8,
'a': -1, 'n': 1,
'm': -12, 'y': 12,
}
# The timezone may be prefixed by 'Etc/'
_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
_rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
_rfc822_match = re.compile(
"(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
).match
def _parse_date_group_rfc822(m):
# Calculate a date and timestamp
for k in ('year', 'day', 'hour', 'minute', 'second'):
m[k] = int(m[k])
m['month'] = _rfc822_months.index(m['month']) + 1
# If the year is 2 digits, assume everything in the 90's is the 1990's
if m['year'] < 100:
m['year'] += (1900, 2000)[m['year'] < 90]
stamp = datetime.datetime(*[m[i] for i in
('year', 'month', 'day', 'hour', 'minute', 'second')])
# Use the timezone information to calculate the difference between
# the given date and timestamp and Universal Coordinated Time
tzhour = 0
tzmin = 0
if m['tz'] and m['tz'].startswith('gmt'):
# Handle GMT and GMT+hh:mm timezone syntax (the trailing
# timezone info will be handled by the next `if` block)
m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
if not m['tz']:
pass
elif m['tz'].startswith('+'):
tzhour = int(m['tz'][1:3])
tzmin = int(m['tz'][3:])
elif m['tz'].startswith('-'):
tzhour = int(m['tz'][1:3]) * -1
tzmin = int(m['tz'][3:]) * -1
else:
tzhour = _rfc822_tznames[m['tz']]
delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
# Return the date and timestamp in UTC
return (stamp - delta).utctimetuple()
def _parse_date_rfc822(dt):
"""Parse RFC 822 dates and times, with one minor
difference: years may be 4DIGIT or 2DIGIT.
http://tools.ietf.org/html/rfc822#section-5"""
try:
m = _rfc822_match(dt.lower()).groupdict(0)
except AttributeError:
return None
return _parse_date_group_rfc822(m)
registerDateHandler(_parse_date_rfc822)
def _parse_date_rfc822_grubby(dt):
"""Parse date format similar to RFC 822, but
the comma after the dayname is optional and
month/day are inverted"""
_rfc822_date_grubby = "%s %s %s" % (_rfc822_month, _rfc822_day, _rfc822_year)
_rfc822_match_grubby = re.compile(
"(?:%s[,]? )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date_grubby, _rfc822_time)
).match
try:
m = _rfc822_match_grubby(dt.lower()).groupdict(0)
except AttributeError:
return None
return _parse_date_group_rfc822(m)
registerDateHandler(_parse_date_rfc822_grubby)
def _parse_date_asctime(dt):
"""Parse asctime-style dates"""
dayname, month, day, remainder = dt.split(None, 3)
# Convert month and day into zero-padded integers
month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
day = '%02i ' % (int(day),)
dt = month + day + remainder
return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
registerDateHandler(_parse_date_asctime)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
m = _my_date_pattern.search(aDateString)
if m is None:
return None
dow, year, month, day, hour, minute, second, tz = m.groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
ZERO_BYTES = _l2bytes([0x00, 0x00])
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
def convert_to_utf8(http_headers, data):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = u''
xml_encoding = u''
rfc3023_encoding = u''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = u'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = u'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = u'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = u'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = u'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = u'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = u'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = u'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = u'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
u'u16', u'utf-16', u'utf16', u'utf_16',
u'u32', u'utf-32', u'utf32', u'utf_32',
u'iso-10646-ucs-2', u'iso-10646-ucs-4',
u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if not isinstance(http_encoding, unicode):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = (u'application/xml', u'application/xml-dtd',
u'application/xml-external-parsed-entity')
text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith(u'application/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith(u'text/') and
http_content_type.endswith(u'+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_content_type.startswith(u'text/'):
rfc3023_encoding = http_encoding or u'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or u'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or u'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == u'gb2312':
rfc3023_encoding = u'gb18030'
if xml_encoding.lower() == u'gb2312':
xml_encoding = u'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
chardet_encoding = None
tried_encodings = []
if chardet:
chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + u'\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = u''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
return data, rfc3023_encoding, error
# Match XML entity declarations.
# Example: <!ENTITY copyright "(C)">
RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
# Match XML DOCTYPE declarations.
# Example: <!DOCTYPE feed [ ]>
RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
# Match safe entity declarations.
# This will allow hexadecimal character references through,
# as well as text, but not arbitrary nested entities.
# Example: cubed "³"
# Example: copyright "(C)"
# Forbidden: explode1 "&explode2;&explode2;"
RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
def replace_doctype(data):
'''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document with a replaced DOCTYPE
'''
# Divide the document into two groups by finding the location
# of the first element that doesn't begin with '<?' or '<!'.
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
head, data = data[:start+1], data[start+1:]
# Save and then remove all of the ENTITY declarations.
entity_results = RE_ENTITY_PATTERN.findall(head)
head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
# Find the DOCTYPE declaration and check the feed type.
doctype_results = RE_DOCTYPE_PATTERN.findall(head)
doctype = doctype_results and doctype_results[0] or _s2bytes('')
if _s2bytes('netscape') in doctype.lower():
version = u'rss091n'
else:
version = None
# Re-insert the safe ENTITY declarations if a DOCTYPE was found.
replacement = _s2bytes('')
if len(doctype_results) == 1 and entity_results:
match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
safe_entities = filter(match_safe_entities, entity_results)
if safe_entities:
replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
+ _s2bytes('>\n<!ENTITY ').join(safe_entities) \
+ _s2bytes('>\n]>')
data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
# Precompute the safe entities for the loose parser.
safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
return version, data, safe_entities
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
'''Parse a feed from a URL, file, stream, or string.
request_headers, if given, is a dict from http header name to value to add
to the request; this overrides internally generated values.
'''
if handlers is None:
handlers = []
if request_headers is None:
request_headers = {}
if response_headers is None:
response_headers = {}
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
result['bozo'] = 0
if not isinstance(handlers, list):
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = None
f = None
if hasattr(f, 'headers'):
result['headers'] = dict(f.headers)
# overwrite existing headers using response_headers
if 'headers' in result:
result['headers'].update(response_headers)
elif response_headers:
result['headers'] = copy.deepcopy(response_headers)
# lowercase all of the HTTP headers for comparisons per RFC 2616
if 'headers' in result:
http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
else:
http_headers = {}
# if feed is gzip-compressed, decompress it
if f and data and http_headers:
if gzip and 'gzip' in http_headers.get('content-encoding', ''):
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except (IOError, struct.error), e:
# IOError can occur if the gzip header is bad.
# struct.error can occur if the data is damaged.
result['bozo'] = 1
result['bozo_exception'] = e
if isinstance(e, struct.error):
# A gzip header was found but the data is corrupt.
# Ideally, we should re-request the feed without the
# 'Accept-encoding: gzip' header, but we don't.
data = None
elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
try:
data = zlib.decompress(data)
except zlib.error, e:
try:
# The data may have no headers and no checksum.
data = zlib.decompress(data, -15)
except zlib.error, e:
result['bozo'] = 1
result['bozo_exception'] = e
# save HTTP headers
if http_headers:
if 'etag' in http_headers:
etag = http_headers.get('etag', u'')
if not isinstance(etag, unicode):
etag = etag.decode('utf-8', 'ignore')
if etag:
result['etag'] = etag
if 'last-modified' in http_headers:
modified = http_headers.get('last-modified', u'')
if modified:
result['modified'] = modified
result['modified_parsed'] = _parse_date(modified)
if hasattr(f, 'url'):
if not isinstance(f.url, unicode):
result['href'] = f.url.decode('utf-8', 'ignore')
else:
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'close'):
f.close()
if data is None:
return result
# Stop processing if the server sent HTTP 304 Not Modified.
if getattr(f, 'code', 0) == 304:
result['version'] = u''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
data, result['encoding'], error = convert_to_utf8(http_headers, data)
use_strict_parser = result['encoding'] and True or False
if error is not None:
result['bozo'] = 1
result['bozo_exception'] = error
result['version'], data, entities = replace_doctype(data)
# Ensure that baseuri is an absolute URI using an acceptable URI scheme.
contentloc = http_headers.get('content-location', u'')
href = result.get('href', u'')
baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
baselang = http_headers.get('content-language', None)
if not isinstance(baselang, unicode) and baselang is not None:
baselang = baselang.decode('utf-8', 'ignore')
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
try:
# disable downloading external doctype references, if possible
saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
except xml.sax.SAXNotSupportedException:
pass
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
try:
saxparser.parse(source)
except xml.sax.SAXException, e:
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser and _SGML_AVAILABLE:
feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
feedparser.feed(data.decode('utf-8', 'replace'))
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
| kasparg/lawcats | libs/feedparser.py | Python | gpl-3.0 | 166,787 |
"""
A local module that is used by a predict function. This file is used to
test the functionality of exporting modules through a package."""
COEFFICIENT = 4
| nishadsingh1/clipper | integration-tests/util_package/mock_module_in_package.py | Python | apache-2.0 | 159 |
from django.conf.urls import url
from team import views
"""
URL Map
[team public views]
/
/<team_name>
/team_id/players [manage player number, active]
/team_id/events [team internal meetings]
/team_id/upload [upload team picture]
/team_id/photos [team photos]
/team_id/finance [team expenses]
/team_name/schedules [team training session schedule]
/team_name/school
[schedule]
1) season
team_id
name
school
start_date
end_date
address
2) session
start_time
end_time
"""
urlpatterns = [
# ================================================
# public views for a team
# ================================================
# display teams default page
url(r'^$', views.teams, name='teams'),
# display teams by team name page
url(r'^(?P<name>[^/]+)$', views.team_detail, name='team_detail'),
# ================================================
# profile views
# ================================================
url(r'^profile/(?P<username>[^/]+)$', views.edit_profile, name='edit_profile'),
url(r'^player/(?P<username>[^/]+)/enroll$', views.player_enroll, name='player_enroll'),
# ================================================
# captain manage players
# player status update
# (f)free - player can join other teams
# (p)pending - player left the league, or new apply to join
# (a)active - player is active
# ================================================
# pending-players, players requested to join
url(r'^(?P<team_name>[^/]+)/players/pending$', views.pending_players, name='pending_players'),
# current-players for editing
url(r'^(?P<team_name>[^/]+)/players/manage$', views.manage_players, name='manage_players'),
# captain manage players
url(r'^player/(?P<player_id>[^/]+)/leave$', views.leave_team, name='leave_team'),
url(r'^player/(?P<player_id>[^/]+)/approve$', views.join_team, name='join_team'),
url(r'^(?P<team_name>[^/]+)/player/(?P<username>[^/]+)/number$', views.player_number, name='player_number'),
# # show captain profile in edit form
# url(r'^captain/(?P<username>[^/]+)/$', views.captain_profile, name='captain_profile'),
#
# # show
# url(r'^enroll/$', views.post_enroll, name='post_enroll'),
# ================================================
# url(r'^profile/$', views.profile, name='profile'),
# url(r'^logout/$', views.user_logout, name='logout'),
]
| vollov/lotad | team/urls.py | Python | mit | 2,476 |
import unittest2
from mock import Mock, patch
import urllib2
from google.appengine.api import taskqueue
from google.appengine.ext import testbed
from models.notifications.requests.request import Request
from models.notifications.requests.webhook_request import WebhookRequest
from tests.mocks.urllib2.mock_http_error import MockHTTPError
from tests.mocks.notifications.mock_notification import MockNotification
class TestWebhookRequest(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_taskqueue_stub(root_path='.')
self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def test_subclass(self):
request = WebhookRequest(MockNotification(), 'https://www.thebluealliance.com', 'secret')
self.assertTrue(isinstance(request, Request))
def test_str(self):
message_str = WebhookRequest(MockNotification(), 'https://www.thebluealliance.com', 'secret')
self.assertTrue('WebhookRequest(notification=' in str(message_str))
def test_webhook_message(self):
webhook_message_data = {'test': 'something'}
notification = MockNotification(webhook_message_data=webhook_message_data)
message = WebhookRequest(notification, 'https://www.thebluealliance.com', 'secret')
self.assertEqual(message._json_string(), '{"message_data": {"test": "something"}, "message_type": "verification"}')
def test_webhook_message_no_payload(self):
notification = MockNotification()
message = WebhookRequest(notification, 'https://www.thebluealliance.com', 'secret')
self.assertEqual(message._json_string(), '{"message_type": "verification"}')
def test_generate_webhook_checksum(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
message_json = message._json_string()
self.assertEqual(message._generate_webhook_checksum(message_json), 'dbecd85ae53d221c387647085912d2107fa04cd5')
def test_generate_webhook_checksum_hmac(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
message_json = message._json_string()
self.assertEqual(message._generate_webhook_hmac(message_json), 'fb2b61d55884a648b35801688754924778b3e71ea2bf8f5effb0f3ffecb5c940')
def test_generate_webhook_checksum_hmac_unicode_ascii(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', unicode('secret'))
message_json = message._json_string()
self.assertEqual(message._generate_webhook_hmac(message_json), 'fb2b61d55884a648b35801688754924778b3e71ea2bf8f5effb0f3ffecb5c940')
def test_generate_webhook_checksum_hmac_unicode_nonascii(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', '\x80secret')
message_json = message._json_string()
self.assertEqual(message._generate_webhook_hmac(message_json), '2e9e974847184a9f611ed082ba0e76525d1364de520968fbe188737344358d61')
def test_send_headers(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
with patch.object(urllib2, 'urlopen') as mock_urlopen:
message.send()
mock_urlopen.assert_called_once()
request = mock_urlopen.call_args_list[0][0][0]
self.assertIsNotNone(request)
self.assertEqual(request.headers, {'X-tba-checksum': 'dbecd85ae53d221c387647085912d2107fa04cd5', 'Content-type': 'application/json; charset="utf-8"', 'X-tba-hmac': 'fb2b61d55884a648b35801688754924778b3e71ea2bf8f5effb0f3ffecb5c940', 'X-tba-version': '1'})
def test_send(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
with patch.object(urllib2, 'urlopen') as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_called_once_with(1)
self.assertTrue(success)
def test_send_errors(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
for code in [400, 401, 500]:
error_mock = Mock()
error_mock.side_effect = MockHTTPError(code)
with patch.object(urllib2, 'urlopen', error_mock) as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_not_called()
self.assertTrue(success)
def test_send_error_unknown(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
error_mock = Mock()
error_mock.side_effect = MockHTTPError(-1)
with patch.object(urllib2, 'urlopen', error_mock) as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_not_called()
self.assertTrue(success)
def test_send_fail_404(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
error_mock = Mock()
error_mock.side_effect = MockHTTPError(404)
with patch.object(urllib2, 'urlopen', error_mock) as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_not_called()
self.assertFalse(success)
def test_send_fail_url_error(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
error_mock = Mock()
error_mock.side_effect = urllib2.URLError('testing')
with patch.object(urllib2, 'urlopen', error_mock) as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_not_called()
self.assertFalse(success)
def test_send_fail_deadline_error(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
error_mock = Mock()
error_mock.side_effect = Exception('Deadline exceeded while waiting for HTTP response from URL: https://thebluealliance.com')
with patch.object(urllib2, 'urlopen', error_mock) as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_not_called()
self.assertTrue(success)
def test_send_error_other(self):
message = WebhookRequest(MockNotification(webhook_message_data={'data': 'value'}), 'https://www.thebluealliance.com', 'secret')
error_mock = Mock()
error_mock.side_effect = Exception('testing')
with patch.object(urllib2, 'urlopen', error_mock) as mock_urlopen, patch.object(message, 'defer_track_notification') as mock_track:
success = message.send()
mock_urlopen.assert_called_once()
mock_track.assert_not_called()
self.assertTrue(success)
| fangeugene/the-blue-alliance | tests/models_tests/notifications/requests/test_webhook_request.py | Python | mit | 7,863 |
import os.path
from edalize.edatool import Edatool
class Trellis(Edatool):
argtypes = ['vlogdefine', 'vlogparam']
@classmethod
def get_doc(cls, api_ver):
if api_ver == 0:
return {'description' : "Project Trellis enables a fully open-source flow for ECP5 FPGAs using Yosys for Verilog synthesis and nextpnr for place and route",
'lists' : [
{'name' : 'nextpnr_options',
'type' : 'String',
'desc' : 'Additional options for nextpnr'},
{'name' : 'yosys_synth_options',
'type' : 'String',
'desc' : 'Additional options for the synth_ecp5 command'},
]}
def configure_main(self):
# Write yosys script file
(src_files, incdirs) = self._get_fileset_files()
with open(os.path.join(self.work_root, self.name+'.ys'), 'w') as yosys_file:
for key, value in self.vlogdefine.items():
yosys_file.write("verilog_defines -D{}={}\n".format(key, self._param_value_str(value)))
yosys_file.write("verilog_defaults -push\n")
yosys_file.write("verilog_defaults -add -defer\n")
if incdirs:
yosys_file.write("verilog_defaults -add {}\n".format(' '.join(['-I'+d for d in incdirs])))
lpf_files = []
for f in src_files:
if f.file_type in ['verilogSource']:
yosys_file.write("read_verilog {}\n".format(f.name))
elif f.file_type in ['systemVerilogSource']:
yosys_file.write("read_verilog -sv {}\n".format(f.name))
elif f.file_type == 'LPF':
lpf_files.append(f.name)
elif f.file_type == 'user':
pass
for key, value in self.vlogparam.items():
_s = "chparam -set {} {} $abstract\{}\n"
yosys_file.write(_s.format(key,
self._param_value_str(value, '"'),
self.toplevel))
yosys_file.write("verilog_defaults -pop\n")
yosys_file.write("synth_ecp5 -nomux")
yosys_synth_options = self.tool_options.get('yosys_synth_options', [])
for option in yosys_synth_options:
yosys_file.write(' ' + option)
yosys_file.write(" -json {}.json".format(self.name))
if self.toplevel:
yosys_file.write(" -top " + self.toplevel)
yosys_file.write("\n")
if not lpf_files:
lpf_files = ['empty.lpf']
with open(os.path.join(self.work_root, lpf_files[0]), 'a'):
os.utime(os.path.join(self.work_root, lpf_files[0]), None)
elif len(lpf_files) > 1:
raise RuntimeError("trellis backend supports only one LPF file. Found {}".format(', '.join(lpf_files)))
# Write Makefile
nextpnr_options = self.tool_options.get('nextpnr_options', [])
template_vars = {
'name' : self.name,
'lpf_file' : lpf_files[0],
'nextpnr_options' : nextpnr_options,
}
self.render_template('trellis-makefile.j2',
'Makefile',
template_vars)
| lowRISC/edalize | edalize/trellis.py | Python | bsd-2-clause | 3,427 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Felix Fontein <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: x509_crl
version_added: "2.10"
short_description: Generate Certificate Revocation Lists (CRLs)
description:
- This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
- Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
or as a path to a certificate file in PEM format.
requirements:
- cryptography >= 1.2
author:
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the CRL file should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
mode:
description:
- Defines how to process entries of existing CRLs.
- If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
as specified in I(revoked_certificates).
- If set to C(update), makes sure that the CRL contains the revoked certificates from
I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
already exists, all entries from the existing CRL will also be included in the new CRL.
When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
type: str
default: generate
choices: [ generate, update ]
force:
description:
- Should the CRL be forced to be regenerated.
type: bool
default: no
backup:
description:
- Create a backup file including a timestamp so you can get the original
CRL back if you overwrote it with a new one by accident.
type: bool
default: no
path:
description:
- Remote absolute path where the generated CRL file should be created or is already located.
type: path
required: yes
privatekey_path:
description:
- Path to the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path).
- This is required if the private key is password protected.
type: str
issuer:
description:
- Key/value pairs that will be present in the issuer name field of the CRL.
- If you need to specify more than one value with the same key, use a list as value.
- Required if I(state) is C(present).
type: dict
last_update:
description:
- The point in time from which this CRL can be trusted.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
next_update:
description:
- "The absolute latest point in time by which this I(issuer) is expected to have issued
another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
- Required if I(state) is C(present).
type: str
digest:
description:
- Digest algorithm to be used when signing the CRL.
type: str
default: sha256
revoked_certificates:
description:
- List of certificates to be revoked.
- Required if I(state) is C(present).
type: list
elements: dict
suboptions:
path:
description:
- Path to a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(content) and I(serial_number). One of these three options
must be specified.
type: path
content:
description:
- Content of a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(path) and I(serial_number). One of these three options
must be specified.
type: str
serial_number:
description:
- Serial number of the certificate.
- Mutually exclusive with I(path) and I(content). One of these three options must
be specified.
type: int
revocation_date:
description:
- The point in time the certificate was revoked.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
issuer:
description:
- The certificate's issuer.
- "Example: C(DNS:ca.example.org)"
type: list
elements: str
issuer_critical:
description:
- Whether the certificate issuer extension should be critical.
type: bool
default: no
reason:
description:
- The value for the revocation reason extension.
type: str
choices:
- unspecified
- key_compromise
- ca_compromise
- affiliation_changed
- superseded
- cessation_of_operation
- certificate_hold
- privilege_withdrawn
- aa_compromise
- remove_from_crl
reason_critical:
description:
- Whether the revocation reason extension should be critical.
type: bool
default: no
invalidity_date:
description:
- The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent. This will NOT
change when I(ignore_timestamps) is set to C(yes).
type: str
invalidity_date_critical:
description:
- Whether the invalidity date extension should be critical.
type: bool
default: no
ignore_timestamps:
description:
- Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
I(invalidity_date) in I(revoked_certificates) will never be ignored.
- Use this in combination with relative timestamps for these values to get idempotency.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) CRL's content as I(crl).
type: bool
default: no
extends_documentation_fragment:
- files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
'''
EXAMPLES = r'''
- name: Generate a CRL
x509_crl:
path: /etc/ssl/my-ca.crl
privatekey_path: /etc/ssl/private/my-ca.pem
issuer:
CN: My CA
last_update: "+0s"
next_update: "+7d"
revoked_certificates:
- serial_number: 1234
revocation_date: 20190331202428Z
issuer:
CN: My CA
- serial_number: 2345
revocation_date: 20191013152910Z
reason: affiliation_changed
invalidity_date: 20191001000000Z
- path: /etc/ssl/crt/revoked-cert.pem
revocation_date: 20191010010203Z
'''
RETURN = r'''
filename:
description: Path to the generated CRL
returned: changed or success
type: str
sample: /path/to/my-ca.crl
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/my-ca.crl.2019-03-09@11:22~
privatekey:
description: Path to the private CA key
returned: changed or success
type: str
sample: /path/to/my-ca.pem
issuer:
description:
- The CRL's issuer.
- Note that for repeated values, only the last one will be returned.
returned: success
type: dict
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
issuer_ordered:
description: The CRL's issuer as an ordered list of tuples.
returned: success
type: list
elements: list
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
last_update:
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
next_update:
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
digest:
description: The signature algorithm used to sign the CRL.
returned: success
type: str
sample: sha256WithRSAEncryption
revoked_certificates:
description: List of certificates to be revoked.
returned: success
type: list
elements: dict
contains:
serial_number:
description: Serial number of the certificate.
type: int
sample: 1234
revocation_date:
description: The point in time the certificate was revoked as ASN.1 TIME.
type: str
sample: 20190413202428Z
issuer:
description: The certificate's issuer.
type: list
elements: str
sample: '["DNS:ca.example.org"]'
issuer_critical:
description: Whether the certificate issuer extension is critical.
type: bool
sample: no
reason:
description:
- The value for the revocation reason extension.
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
C(remove_from_crl).
type: str
sample: key_compromise
reason_critical:
description: Whether the revocation reason extension is critical.
type: bool
sample: no
invalidity_date:
description: |
The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid as ASN.1 TIME.
type: str
sample: 20190413202428Z
invalidity_date_critical:
description: Whether the invalidity date extension is critical.
type: bool
sample: no
crl:
description: The (current or generated) CRL's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
'''
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import (
CertificateRevocationListBuilder,
RevokedCertificateBuilder,
NameAttribute,
Name,
)
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
class CRLError(crypto_utils.OpenSSLObjectError):
pass
class CRL(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(CRL, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.update = module.params['mode'] == 'update'
self.ignore_timestamps = module.params['ignore_timestamps']
self.return_content = module.params['return_content']
self.crl_content = None
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.issuer = crypto_utils.parse_name_field(module.params['issuer'])
self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
self.last_update = crypto_utils.get_relative_time_option(module.params['last_update'], 'last_update')
self.next_update = crypto_utils.get_relative_time_option(module.params['next_update'], 'next_update')
self.digest = crypto_utils.select_message_digest(module.params['digest'])
if self.digest is None:
raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
self.revoked_certificates = []
for i, rc in enumerate(module.params['revoked_certificates']):
result = {
'serial_number': None,
'revocation_date': None,
'issuer': None,
'issuer_critical': False,
'reason': None,
'reason_critical': False,
'invalidity_date': None,
'invalidity_date_critical': False,
}
path_prefix = 'revoked_certificates[{0}].'.format(i)
if rc['path'] is not None or rc['content'] is not None:
# Load certificate from file or content
try:
if rc['content'] is not None:
rc['content'] = rc['content'].encode('utf-8')
cert = crypto_utils.load_certificate(rc['path'], content=rc['content'], backend='cryptography')
try:
result['serial_number'] = cert.serial_number
except AttributeError:
# The property was called "serial" before cryptography 1.4
result['serial_number'] = cert.serial
except crypto_utils.OpenSSLObjectError as e:
if rc['content'] is not None:
module.fail_json(
msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
)
else:
module.fail_json(
msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
)
else:
# Specify serial_number (and potentially issuer) directly
result['serial_number'] = rc['serial_number']
# All other options
if rc['issuer']:
result['issuer'] = [crypto_utils.cryptography_get_name(issuer) for issuer in rc['issuer']]
result['issuer_critical'] = rc['issuer_critical']
result['revocation_date'] = crypto_utils.get_relative_time_option(
rc['revocation_date'],
path_prefix + 'revocation_date'
)
if rc['reason']:
result['reason'] = crypto_utils.REVOCATION_REASON_MAP[rc['reason']]
result['reason_critical'] = rc['reason_critical']
if rc['invalidity_date']:
result['invalidity_date'] = crypto_utils.get_relative_time_option(
rc['invalidity_date'],
path_prefix + 'invalidity_date'
)
result['invalidity_date_critical'] = rc['invalidity_date_critical']
self.revoked_certificates.append(result)
self.module = module
self.backup = module.params['backup']
self.backup_file = None
try:
self.privatekey = crypto_utils.load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend='cryptography'
)
except crypto_utils.OpenSSLBadPassphraseError as exc:
raise CRLError(exc)
self.crl = None
try:
with open(self.path, 'rb') as f:
data = f.read()
self.crl = x509.load_pem_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = data
except Exception as dummy:
self.crl_content = None
def remove(self):
if self.backup:
self.backup_file = self.module.backup_local(self.path)
super(CRL, self).remove(self.module)
def _compress_entry(self, entry):
if self.ignore_timestamps:
# Throw out revocation_date
return (
entry['serial_number'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
else:
return (
entry['serial_number'],
entry['revocation_date'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
def check(self, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CRL, self).check(self.module, perms_required)
if not state_and_perms:
return False
if self.crl is None:
return False
if self.last_update != self.crl.last_update and not self.ignore_timestamps:
return False
if self.next_update != self.crl.next_update and not self.ignore_timestamps:
return False
if self.digest.name != self.crl.signature_hash_algorithm.name:
return False
want_issuer = [(crypto_utils.cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
return False
old_entries = [self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
if self.update:
# We don't simply use a set so that duplicate entries are treated correctly
for entry in new_entries:
try:
old_entries.remove(entry)
except ValueError:
return False
else:
if old_entries != new_entries:
return False
return True
def _generate_crl(self):
backend = default_backend()
crl = CertificateRevocationListBuilder()
try:
crl = crl.issuer_name(Name([
NameAttribute(crypto_utils.cryptography_name_to_oid(entry[0]), to_text(entry[1]))
for entry in self.issuer
]))
except ValueError as e:
raise CRLError(e)
crl = crl.last_update(self.last_update)
crl = crl.next_update(self.next_update)
if self.update and self.crl:
new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
for entry in self.crl:
decoded_entry = self._compress_entry(crypto_utils.cryptography_decode_revoked_certificate(entry))
if decoded_entry not in new_entries:
crl = crl.add_revoked_certificate(entry)
for entry in self.revoked_certificates:
revoked_cert = RevokedCertificateBuilder()
revoked_cert = revoked_cert.serial_number(entry['serial_number'])
revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
if entry['issuer'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CertificateIssuer([
crypto_utils.cryptography_get_name(name) for name in self.entry['issuer']
]),
entry['issuer_critical']
)
if entry['reason'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CRLReason(entry['reason']),
entry['reason_critical']
)
if entry['invalidity_date'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.InvalidityDate(entry['invalidity_date']),
entry['invalidity_date_critical']
)
crl = crl.add_revoked_certificate(revoked_cert.build(backend))
self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
return self.crl.public_bytes(Encoding.PEM)
def generate(self):
if not self.check(perms_required=False) or self.force:
result = self._generate_crl()
if self.return_content:
self.crl_content = result
if self.backup:
self.backup_file = self.module.backup_local(self.path)
crypto_utils.write_file(self.module, result)
self.changed = True
file_args = self.module.load_file_common_arguments(self.module.params)
if self.module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def _dump_revoked(self, entry):
return {
'serial_number': entry['serial_number'],
'revocation_date': entry['revocation_date'].strftime(TIMESTAMP_FORMAT),
'issuer':
[crypto_utils.cryptography_decode_name(issuer) for issuer in entry['issuer']]
if entry['issuer'] is not None else None,
'issuer_critical': entry['issuer_critical'],
'reason': crypto_utils.REVOCATION_REASON_MAP_INVERSE.get(entry['reason']) if entry['reason'] is not None else None,
'reason_critical': entry['reason_critical'],
'invalidity_date':
entry['invalidity_date'].strftime(TIMESTAMP_FORMAT)
if entry['invalidity_date'] is not None else None,
'invalidity_date_critical': entry['invalidity_date_critical'],
}
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'last_update': None,
'next_update': None,
'digest': None,
'issuer_ordered': None,
'issuer': None,
'revoked_certificates': [],
}
if self.backup_file:
result['backup_file'] = self.backup_file
if check_mode:
result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
# result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
result['digest'] = self.module.params['digest']
result['issuer_ordered'] = self.issuer
result['issuer'] = {}
for k, v in self.issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for entry in self.revoked_certificates:
result['revoked_certificates'].append(self._dump_revoked(entry))
elif self.crl:
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
try:
result['digest'] = crypto_utils.cryptography_oid_to_name(self.crl.signature_algorithm_oid)
except AttributeError:
# Older cryptography versions don't have signature_algorithm_oid yet
dotted = crypto_utils._obj2txt(
self.crl._backend._lib,
self.crl._backend._ffi,
self.crl._x509_crl.sig_alg.algorithm
)
oid = x509.oid.ObjectIdentifier(dotted)
result['digest'] = crypto_utils.cryptography_oid_to_name(oid)
issuer = []
for attribute in self.crl.issuer:
issuer.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
result['issuer_ordered'] = issuer
result['issuer'] = {}
for k, v in issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for cert in self.crl:
entry = crypto_utils.cryptography_decode_revoked_certificate(cert)
result['revoked_certificates'].append(self._dump_revoked(entry))
if self.return_content:
result['crl'] = self.crl_content
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
mode=dict(type='str', default='generate', choices=['generate', 'update']),
force=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
path=dict(type='path', required=True),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
issuer=dict(type='dict'),
last_update=dict(type='str', default='+0s'),
next_update=dict(type='str'),
digest=dict(type='str', default='sha256'),
ignore_timestamps=dict(type='bool', default=False),
return_content=dict(type='bool', default=False),
revoked_certificates=dict(
type='list',
elements='dict',
options=dict(
path=dict(type='path'),
content=dict(type='str'),
serial_number=dict(type='int'),
revocation_date=dict(type='str', default='+0s'),
issuer=dict(type='list', elements='str'),
issuer_critical=dict(type='bool', default=False),
reason=dict(
type='str',
choices=[
'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
'superseded', 'cessation_of_operation', 'certificate_hold',
'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
]
),
reason_critical=dict(type='bool', default=False),
invalidity_date=dict(type='str'),
invalidity_date_critical=dict(type='bool', default=False),
),
required_one_of=[['path', 'content', 'serial_number']],
mutually_exclusive=[['path', 'content', 'serial_number']],
),
),
required_if=[
('state', 'present', ['privatekey_path', 'privatekey_content'], True),
('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
add_file_common_args=True,
)
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
try:
crl = CRL(module)
if module.params['state'] == 'present':
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = module.params['force'] or not crl.check()
module.exit_json(**result)
crl.generate()
else:
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
crl.remove()
result = crl.dump()
module.exit_json(**result)
except crypto_utils.OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
| azaghal/ansible | test/support/integration/plugins/modules/x509_crl.py | Python | gpl-3.0 | 31,883 |
# create some notes
v = VDict()
v["1254684338981.png"] = 'use 42.242.123.43'
v["1254685582365.png"] = 'use my default login'
# add a new note
popup('Take a screenshot to add a note')
new_image = capture()
new_note = input()
v[new_image] = new_note
# lookup some notes
for i in (1,2):
popup('Take a screenshot to retrieve a note')
query_image = capture()
retrieved_note = v[query_image][0]
popup(retrieved_note)
| ck1125/sikuli | sikuli-ide/sample-scripts/note2.sikuli/note2.py | Python | mit | 421 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class BoolModelOperations(object):
"""BoolModelOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_true(
self, custom_headers=None, raw=False, **operation_config):
"""Get true Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/true'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_true(
self, bool_body, custom_headers=None, raw=False, **operation_config):
"""Set Boolean value true.
:param bool_body:
:type bool_body: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/true'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(bool_body, 'bool')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_false(
self, custom_headers=None, raw=False, **operation_config):
"""Get false Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_false(
self, bool_body, custom_headers=None, raw=False, **operation_config):
"""Set Boolean value false.
:param bool_body:
:type bool_body: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/false'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(bool_body, 'bool')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Boolean value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: bool
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsBodyBoolean.models.ErrorException>`
"""
# Construct URL
url = '/bool/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('bool', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| fhoring/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/autorestbooltestservice/operations/bool_model_operations.py | Python | mit | 10,549 |
from datetime import datetime
from flask import request
from flask_restful.reqparse import Argument
from cloudify._compat import text_type
from cloudify import constants as common_constants
from cloudify.workflows import events as common_events, tasks
from cloudify.models_states import ExecutionState
from sqlalchemy.dialects.postgresql import JSON
from manager_rest import manager_exceptions
from manager_rest.rest.rest_utils import (
get_args_and_verify_arguments,
get_json_and_verify_params,
parse_datetime_string,
)
from manager_rest.rest.rest_decorators import (
marshal_with,
paginate,
detach_globals,
)
from manager_rest.storage import (
get_storage_manager,
models,
db,
)
from manager_rest.security.authorization import authorize
from manager_rest.resource_manager import get_resource_manager
from manager_rest.security import SecuredResource
from manager_rest.security.authorization import check_user_action_allowed
from manager_rest.execution_token import current_execution
class Operations(SecuredResource):
@authorize('operations')
@marshal_with(models.Operation)
@paginate
def get(self, _include=None, pagination=None, **kwargs):
args = get_args_and_verify_arguments([
Argument('graph_id', type=text_type, required=False),
Argument('execution_id', type=text_type, required=False),
Argument('state', type=text_type, required=False),
Argument('skip_internal', type=bool, required=False),
])
sm = get_storage_manager()
graph_id = args.get('graph_id')
exc_id = args.get('execution_id')
state = args.get('state')
skip_internal = args.get('skip_internal')
filters = {}
if graph_id and exc_id:
raise manager_exceptions.BadParametersError(
'Pass either graph_id or execution_id, not both')
elif graph_id:
filters['tasks_graph'] = sm.get(models.TasksGraph, graph_id)
elif exc_id:
execution = sm.get(models.Execution, exc_id)
filters['_tasks_graph_fk'] = [
tg._storage_id for tg in execution.tasks_graphs]
else:
raise manager_exceptions.BadParametersError(
'Missing required param: graph_id or execution_id')
if state is not None:
filters['state'] = state
if skip_internal:
filters['type'] = ['SubgraphTask', 'RemoteWorkflowTask']
return sm.list(
models.Operation,
filters=filters,
pagination=pagination,
include=_include,
)
@authorize('operations')
def post(self, **kwargs):
request_dict = get_json_and_verify_params({'action'})
action = request_dict['action']
if action == 'update-stored':
self._update_stored_operations()
return None, 204
def _update_stored_operations(self):
"""Recompute operation inputs, for resumable ops of the given node
For deployment_id's node_id's operation, find stored operations that
weren't finished yet (so can be resumed), and update their inputs
to match the inputs given in the node spec (ie. coming from the plan).
This is useful in deployment-update, so that stored operations that
are resumed after the update, use the already updated values.
"""
deployment_id = request.json['deployment_id']
if not deployment_id:
return None, 204
node_id = request.json['node_id']
op_name = request.json['operation']
sm = get_storage_manager()
with sm.transaction():
dep = sm.get(models.Deployment, deployment_id)
node_id, new_inputs = self._new_operation_details(
sm,
dep,
node_id,
op_name,
rel_index=request.json.get('rel_index'),
key=request.json.get('key'),
)
for op in self._find_resumable_ops(sm, dep, node_id, op_name):
self._update_operation_inputs(sm, op, new_inputs)
def _new_operation_details(self, sm, deployment, node_id, operation_name,
rel_index=None, key=None):
"""Find the node_id and new inputs of the updated operation
Note: the node_id might be different than the one we think we're
updating, because if the operation is a target interface of a
relationship, then we actually want the remote-side node of the rel.
"""
node = sm.list(models.Node,
filters={'deployment': deployment, 'id': node_id})[0]
if rel_index is not None:
rel = node.relationships[rel_index]
if key == 'target_operations':
node_id = rel['target_id']
operation = rel[key].get(operation_name, {})
else:
operation = node.operations.get(operation_name, {})
return node_id, operation.get('inputs')
def _find_resumable_ops(self, sm, deployment, node_id, operation_name):
executions = sm.list(models.Execution, filters={
'deployment': deployment,
'status': [
ExecutionState.PENDING,
ExecutionState.STARTED,
ExecutionState.CANCELLED,
ExecutionState.FAILED
]
}, get_all_results=True)
if not executions:
return
graphs = sm.list(models.TasksGraph, filters={
'execution_id': [e.id for e in executions]
}, get_all_results=True)
def _filter_operation(column):
# path in the parameters dict that stores the node name
node_name_path = ('task_kwargs', 'kwargs',
'__cloudify_context', 'node_name')
# ..and the operation interface name,
# eg. cloudify.interfaces.lifecycle.create
# (NOT eg. script.runner.tasks.run)
operation_name_path = ('task_kwargs', 'kwargs',
'__cloudify_context', 'operation', 'name')
# this will use postgres' json operators
json_column = db.cast(column, JSON)
return db.and_(
json_column[node_name_path].astext == node_id,
json_column[operation_name_path].astext == operation_name
)
return sm.list(models.Operation, filters={
'parameters': _filter_operation,
'_tasks_graph_fk': [tg._storage_id for tg in graphs],
'state': [tasks.TASK_RESCHEDULED,
tasks.TASK_FAILED,
tasks.TASK_PENDING]
}, get_all_results=True)
def _update_operation_inputs(self, sm, operation, new_inputs):
try:
operation.parameters['task_kwargs']['kwargs'].update(new_inputs)
operation.parameters['task_kwargs']['kwargs'][
'__cloudify_context']['has_intrinsic_functions'] = True
except KeyError:
return
sm.update(operation, modified_attrs=['parameters'])
class OperationsId(SecuredResource):
@authorize('operations')
@marshal_with(models.Operation)
def get(self, operation_id, **kwargs):
return get_storage_manager().get(models.Operation, operation_id)
@authorize('operations')
@marshal_with(models.Operation)
def put(self, operation_id, **kwargs):
params = get_json_and_verify_params({
'name': {'type': text_type, 'required': True},
'graph_id': {'type': text_type, 'required': True},
'dependencies': {'type': list, 'required': True},
'parameters': {'type': dict},
'type': {'type': text_type}
})
operation = get_resource_manager().create_operation(
operation_id,
name=params['name'],
graph_id=params['graph_id'],
dependencies=params['dependencies'],
type=params['type'],
parameters=params['parameters']
)
return operation, 201
@authorize('operations', allow_if_execution=True)
@detach_globals
def patch(self, operation_id, **kwargs):
request_dict = get_json_and_verify_params({
'state': {'type': text_type},
'result': {'optional': True},
'exception': {'optional': True},
'exception_causes': {'optional': True},
})
sm = get_storage_manager()
with sm.transaction():
instance = sm.get(models.Operation, operation_id, locking=True)
old_state = instance.state
instance.state = request_dict.get('state', instance.state)
if instance.state == common_constants.TASK_SUCCEEDED:
self._on_task_success(sm, instance)
self._insert_event(
instance,
request_dict.get('result'),
request_dict.get('exception'),
request_dict.get('exception_causes')
)
if not instance.is_nop and \
old_state not in common_constants.TERMINATED_STATES and \
instance.state in common_constants.TERMINATED_STATES:
self._modify_execution_operations_counts(instance, 1)
sm.update(instance, modified_attrs=('state',))
return {}, 200
def _on_task_success(self, sm, operation):
handler = getattr(self, f'_on_success_{operation.type}', None)
if handler:
handler(sm, operation)
def _on_success_SetNodeInstanceStateTask(self, sm, operation):
required_permission = 'node_instance_update'
tenant_name = current_execution.tenant.name
check_user_action_allowed(required_permission,
tenant_name=tenant_name)
try:
kwargs = operation.parameters['task_kwargs']
node_instance_id = kwargs['node_instance_id']
state = kwargs['state']
except KeyError:
return
node_instance = sm.get(models.NodeInstance, node_instance_id)
node_instance.state = state
sm.update(node_instance)
def _on_success_SendNodeEventTask(self, sm, operation):
try:
kwargs = operation.parameters['task_kwargs']
except KeyError:
pass
db.session.execute(models.Event.__table__.insert().values(
timestamp=datetime.utcnow(),
reported_timestamp=datetime.utcnow(),
event_type='workflow_node_event',
message=kwargs['event'],
message_code=None,
operation=None,
node_id=kwargs['node_instance_id'],
_execution_fk=current_execution._storage_id,
_tenant_id=current_execution._tenant_id,
_creator_id=current_execution._creator_id,
visibility=current_execution.visibility,
))
def _insert_event(self, operation, result=None, exception=None,
exception_causes=None):
if operation.type not in ('RemoteWorkflowTask', 'SubgraphTask'):
return
if not current_execution:
return
try:
context = operation.parameters['task_kwargs']['kwargs'][
'__cloudify_context']
except (KeyError, TypeError):
context = {}
if exception is not None:
operation.parameters.setdefault('error', str(exception))
current_retries = operation.parameters.get('current_retries') or 0
total_retries = operation.parameters.get('total_retries') or 0
try:
message = common_events.format_event_message(
operation.name,
operation.type,
operation.state,
result,
exception,
current_retries,
total_retries,
)
event_type = common_events.get_event_type(operation.state)
except RuntimeError:
return
db.session.execute(models.Event.__table__.insert().values(
timestamp=datetime.utcnow(),
reported_timestamp=datetime.utcnow(),
event_type=event_type,
message=message,
message_code=None,
operation=context.get('operation', {}).get('name'),
node_id=context.get('node_id'),
source_id=context.get('source_id'),
target_id=context.get('target_id'),
error_causes=exception_causes,
_execution_fk=current_execution._storage_id,
_tenant_id=current_execution._tenant_id,
_creator_id=current_execution._creator_id,
visibility=current_execution.visibility,
))
def _modify_execution_operations_counts(self, operation, finished_delta,
total_delta=0):
"""Increase finished_operations for this operation's execution
This is a separate sql-level update query, rather than ORM-level
calls, for performance: the operation state-update call is on
the critical path for all operations in a workflow; this saves
about 3ms over the ORM approach (which requires fetching the
execution; more if the DB is not local).
"""
exc_table = models.Execution.__table__
tg_table = models.TasksGraph.__table__
values = {}
if finished_delta:
values['finished_operations'] =\
exc_table.c.finished_operations + finished_delta
if total_delta:
values['total_operations'] =\
exc_table.c.total_operations + total_delta
db.session.execute(
exc_table.update()
.where(db.and_(
tg_table.c._execution_fk == exc_table.c._storage_id,
tg_table.c._storage_id == operation._tasks_graph_fk,
))
.values(**values)
)
@authorize('operations')
@marshal_with(models.Operation)
def delete(self, operation_id, **kwargs):
sm = get_storage_manager()
with sm.transaction():
instance = sm.get(models.Operation, operation_id, locking=True)
if not instance.is_nop:
finished_delta = (
-1
if instance.state in common_constants.TERMINATED_STATES
else 0
)
self._modify_execution_operations_counts(
instance, finished_delta, total_delta=-1)
sm.delete(instance)
return instance, 200
class TasksGraphs(SecuredResource):
@authorize('operations')
@marshal_with(models.TasksGraph)
@paginate
def get(self, _include=None, pagination=None, **kwargs):
args = get_args_and_verify_arguments([
Argument('execution_id', type=text_type, required=True),
Argument('name', type=text_type, required=False)
])
sm = get_storage_manager()
execution_id = args.get('execution_id')
name = args.get('name')
execution = sm.get(models.Execution, execution_id)
filters = {'execution': execution}
if name:
filters['name'] = name
return sm.list(
models.TasksGraph,
filters=filters,
pagination=pagination,
include=_include,
)
class TasksGraphsId(SecuredResource):
@authorize('operations')
@marshal_with(models.TasksGraph)
def post(self, **kwargs):
params = get_json_and_verify_params({
'name': {'type': text_type},
'execution_id': {'type': text_type},
'operations': {'optional': True},
'created_at': {'optional': True},
'graph_id': {'optional': True},
})
created_at = params.get('created_at')
operations = params.get('operations', [])
if params.get('graph_id'):
check_user_action_allowed('set_execution_details')
if created_at or any(op.get('created_at') for op in operations):
check_user_action_allowed('set_timestamp')
created_at = parse_datetime_string(params.get('created_at'))
for op in operations:
if op.get('created_at'):
op['created_at'] = parse_datetime_string(op['created_at'])
sm = get_storage_manager()
with sm.transaction():
tasks_graph = get_resource_manager().create_tasks_graph(
name=params['name'],
execution_id=params['execution_id'],
operations=params.get('operations', []),
created_at=created_at,
graph_id=params.get('graph_id')
)
return tasks_graph, 201
@authorize('operations')
@marshal_with(models.TasksGraph)
def patch(self, tasks_graph_id, **kwargs):
request_dict = get_json_and_verify_params(
{'state': {'type': text_type}}
)
sm = get_storage_manager()
instance = sm.get(models.TasksGraph, tasks_graph_id, locking=True)
instance.state = request_dict.get('state', instance.state)
return sm.update(instance)
| cloudify-cosmo/cloudify-manager | rest-service/manager_rest/rest/resources_v3_1/operations.py | Python | apache-2.0 | 17,305 |
# $Id$
from itcc.molecule import relalist
from itcc.tinker import molparam
__revision__ = '$Rev$'
def gettortype(mol, tor):
assert len(tor) == 4
return tuple([mol.atoms[idx].type for idx in tor])
def gettorsbytype(mol, types):
types = [molparam.torsion_uni(type_) for type_ in types]
result = {}
for typ in types:
result[typ] = []
mol.confirmconnect()
tors = relalist.genD(relalist.genconns(mol.connect))
for tor in tors:
typ = molparam.torsion_uni(gettortype(mol, tor))
if typ in types:
result[typ].append(tor)
return result
| lidaobing/itcc | itcc/tinker/analyze.py | Python | gpl-3.0 | 608 |
import os
import shutil
import tempfile
import unittest
import contextlib
from .. import archivebot
def touch(path):
with open(path, "wb"):
pass
@contextlib.contextmanager
def tmpdir():
tmp = tempfile.mkdtemp()
try:
yield tmp
finally:
shutil.rmtree(tmp)
class TestUniqueWritableFile(unittest.TestCase):
def test_should_create_a_file_without_a_sequence_number_when_no_files_exist(self):
with tmpdir() as tmp:
with archivebot._unique_writable_file(tmp, "filename", ".ext") as (filename, _):
self.assertEqual(filename, os.path.join(tmp, "filename.ext"))
def test_should_pick_the_next_sequential_number_when_files_exist(self):
with tmpdir() as tmp:
touch(os.path.join(tmp, "filename.ext"))
touch(os.path.join(tmp, "filename-00000001.ext"))
touch(os.path.join(tmp, "filename-00000002.ext"))
with archivebot._unique_writable_file(tmp, "filename", ".ext") as (filename, _):
self.assertEqual(filename, os.path.join(tmp, "filename-00000003.ext"))
def test_should_raise_OSError_other_than_EEXIST(self):
def try_to_create(directory):
with archivebot._unique_writable_file(directory, "filename", ".ext"):
pass
with tmpdir() as tmp:
non_existing_dir = os.path.join(tmp, "non-existing-dir")
self.assertRaises(OSError, try_to_create, non_existing_dir)
class TestRename(unittest.TestCase):
def test_should_not_go_outside_the_original_directory(self):
with tmpdir() as directory:
tmp_file = tempfile.NamedTemporaryFile(dir=directory, delete=False)
new_name = archivebot._rename(tmp_file.name)
self.assertEqual(os.path.abspath(os.path.dirname(new_name)), os.path.abspath(directory))
def test_should_move_the_original_file(self):
with tmpdir() as directory:
tmp_file = tempfile.NamedTemporaryFile(dir=directory, delete=False)
new_name = archivebot._rename(tmp_file.name)
with open(new_name, "rb") as new_file:
self.assertTrue(os.path.sameopenfile(new_file.fileno(), tmp_file.fileno()))
def test_should_not_keep_the_original_file(self):
with tmpdir() as directory:
tmp_file = tempfile.NamedTemporaryFile(dir=directory, delete=False)
archivebot._rename(tmp_file.name)
self.assertFalse(os.path.isfile(tmp_file.name))
def test_should_change_the_file_name(self):
with tmpdir() as directory:
tmp_file = tempfile.NamedTemporaryFile(dir=directory, delete=False)
new_name = archivebot._rename(tmp_file.name)
self.assertNotEqual(os.path.abspath(tmp_file.name), os.path.abspath(new_name))
def test_should_create_a_filename_with_the_dotcompress_suffix(self):
with tmpdir() as directory:
tmp_file = tempfile.NamedTemporaryFile(dir=directory, delete=False)
new_name = archivebot._rename(tmp_file.name)
self.assertTrue(".compress" in os.path.basename(new_name))
class TestCompress(unittest.TestCase):
def test_dotcompress(self):
with tempfile.NamedTemporaryFile(prefix="roomname.compress@example") as tmp:
self.assertRaises(ValueError, archivebot._compress, tmp.name)
def test_valid_compress(self):
with tmpdir() as directory:
tmp = tempfile.NamedTemporaryFile(suffix=".compress-00000000", dir=directory, delete=False)
tmp.write("test")
gz_file = archivebot._compress(tmp.name)
try:
self.assertEqual(gz_file, tmp.name[:-18] + ".gz")
finally:
os.remove(gz_file)
| abusesa/abusehelper | abusehelper/bots/archivebot/tests/test_archivebot.py | Python | mit | 3,759 |
#!/usr/bin/python
import os, sys
from pygenlib2 import *
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# TOKEN
TOKEN = '''
pragma - @keyword @stmt
namespace - @keyword @stmt
script - @keyword @stmt
include - @keyword @stmt
import - @keyword @stmt
using - @keyword @stmt
class - @keyword @stmt
method - @stmt
format - @stmt
return - @keyword @stmt
yeild - @keyword @stmt
if - @keyword @stmt
switch - @keyword @stmt
case - @keyword @stmt
while - @keyword @stmt
do - @keyword @stmt
for - @keyword @stmt
foreach - @keyword @stmt
break - @keyword @stmt
continue - @keyword @stmt
goto - @keyword @stmt
try - @keyword @stmt
catch - @keyword @stmt
throw - @keyword @stmt
lsend - @stmt
print - @keyword @stmt
assert - @keyword @stmt
checkin - @keyword @stmt
register - @keyword @stmt
select - @keyword @stmt
go - @keyword @stmt
### using
#alias - @keyword
#unit
#enum
#vocab
### class
extends - @keyword
implements - @keyword
### if, switch
else - @keyword
default - @keyword
### if
finally - @keyword
# @array TT_BRACE TT_DOT
... TT_DOTS @top @op
!! TT_EXPT
, TT_COMMA @P0
; TT_SEMICOLON @top
_ TT_ASIS @system @P99
# in expr
function - @keyword @stmt
block - @stmt
new - @keyword
null - @keyword
void - @keyword
var - @keyword
true - @keyword
false - @keyword
byte - @keyword
typeof - @keyword
{ TT_BRACE @top
( TT_PARENTHESIS @top
[ TT_BRANCET @top
. TT_DOT @top
: TT_COLON @P99
-> TT_ARROW @P99
=> TT_FUNCARROW @P99
<<= TT_LSFTE
>>= TT_RSFTE
+= TT_ADDE
-= TT_SUBE
/= TT_DIVE
%= TT_MODE
*= TT_MULE
&= TT_LANDE
|= TT_LORE
?\?= TT_ALTLET
'''
OP = '''
= TT_LET @opLET @P3
<< TT_SEND @opSEND @P2
?\? TT_ALT @opALT @op @P4
? TT_QUESTION @opQ @P4
or TT_OR @opOR @P5
and TT_AND @opAND @P6
not TT_NOT @opNOT @P7 @A1
exists TT_EXISTS @P8 @opEXISTS
#begin TT_isBINARY
is? TT_IS @opIS @P9 @A2
instanceof TT_OF @opOF @P9 @A2
isa? TT_ISA @opISA @P9 @A2
in? TT_IN @opHAS @P9 @A2
##=~ TT_MATCH @opCASE @A2 @P9
== TT_EQ @opEQ @A2 @P9
!= TT_NEQ @opNOTEQ @A2 @P9
< TT_LT @opLT @A2 @P9
<= TT_LTE @opLTE @A2 @P9
> TT_GT @opGT @A2 @P9
>= TT_GTE @opGTE @A2 @P9
from - @opFROM @A2 @top @P15
where - @opWHERE @A2 @P15
to - @opTO @A2 @P15
until - @opUNTIL @A2 @P15
<<< TT_LSFT @opLSFT @P15 @A2
>> TT_RSFT @opRSFT @P15 @A2
+ TT_ADD @opADD @P20
- TT_SUB @opSUB @P20 @A2
/ TT_DIV @opDIV @P24 @A2
% TT_MOD @opMOD @P24 @A2
* TT_MUL @opMUL @P24 @A2
& TT_LAND @opLAND @P26
| TT_LOR @opLOR @P27
^ TT_XOR @opLXOR @P27
#end TT_isBINARY
~ TT_LNOT @opLNOT @A1 @top @P28
++ TT_NEXT @opNEXT @A1 @top @P28
-- TT_PREV @opPREV @A1 @top @P28
.. TT_ITR @opITR @A1 @top @P28
*** TT_TMUL @opEXPAND @A1 @top @P28
+++ TT_TADD @opPLUS @A1 @top @P28
&&& TT_ADDR @opADDR @A1 @top @P28
--- TT_TSUB @opNEG @A1 @top @P28
'''
ALIAS = '''
public @Public
private @Private
final @Final
virtual @Virtual
static @Static
mod %
|| or
&& and
! not
=== is
char Int
short Int
int Int
long Int
float Float
double Float
boolean Boolean
string String
def var
ArrayList Array
Integer Int
TRUE true
FALSE false
NULL null
True true
False false
'''
EXPR = '''
done - @stmt
decl - @stmt
letm - @stmt
pushm - @stmt
cmd - @stmt
extra - @stmt
err - @stmt
# @expr: let - call1
let - @stmt @expr
new - @stmt @expr
tcast - @stmt @expr
qcast - @stmt @expr
func - @stmt @expr
call - @stmt @expr
pcall - @stmt @expr
mt - @stmt @expr
fmt - @stmt @expr
op - @stmt @expr
tri - @stmt @expr
alt - @stmt @expr
and - @stmt @expr @keyword
or - @stmt @expr @keyword
w - @stmt
call1 - @stmt @expr
'''
TOKENT = '''
NUM -
STR -
TSTR -
ESTR -
FMTSTR -
REGEX -
DOC -
METAN -
PROPN -
URN -
QPATH -
NAME -
UNAME -
FUNCNAME -
TYPE -
CID -
MN -
FMT -
MT -
#Typed Token
CONST -
STMT -
SYSVAL -
LOCAL -
STACK -
FIELD -
ERR -
EOT -
'''
#------------------------------------------------------------------------------
TOKENMAP = {}
TT_LIST = []
DTT_LIST = []
OP_LIST = []
MN_LIST = []
STT_LIST = []
DSTT_LIST = []
ALIASMAP = {}
def get_op(tokens):
for t in tokens:
if t.startswith('@op'):
return t.replace('@op', 'MN_op')
return '/*ERROR*/'
def get_p(tokens):
for t in tokens:
if t.startswith('@P'):
return t.replace('@P', '')
return '100'
def keyword(tokens, TT):
for t in tokens:
if t.startswith('@keyword'):
return 'TT_KEYWORD(%s)' % TT
return TT
def readData():
for line in TOKEN.split('\n'):
if line == '' or line.startswith('#'): continue
t = line.split()
if not len(t) > 1: continue
name = t[0]
TT = t[1]
if TT == '-' : TT = 'TT_%s' % name.upper()
TOKENMAP[name] = keyword(t, TT)
TT_LIST.append(TT)
DTT_LIST.append('"%s", /* %s */' % (name, TT))
if '@stmt' in line:
STT_LIST.append(TT.replace('TT_', 'STT_'))
DSTT_LIST.append('"%s", /* S%s */' % (name, TT))
for line in TOKENT.split('\n'):
if line == '' or line.startswith('#'): continue
t = line.split()
if not len(t) > 1: continue
name = t[0]
TT = t[1]
if TT == '-' : TT = 'TT_%s' % name.upper()
TT_LIST.append(TT)
DTT_LIST.append('"%s", /* %s */' % (TT, TT))
for line in OP.split('\n'):
if line == '' or line.startswith('#'): continue
t = line.split()
if not len(t) > 1: continue
name = t[0]
TT = t[1]
if TT == '-' : TT = 'TT_%s' % name.upper()
TOKENMAP[name] = keyword(t, TT)
TT_LIST.append(TT)
DTT_LIST.append('"%s", /* %s */' % (name, TT))
MN_LIST.append(get_op(t))
OP_LIST.append('"%s", %s /* %s */' % (name, get_p(t), TT))
for line in EXPR.split('\n'):
if line == '' or line.startswith('#'): continue
t = line.split()
if not len(t) > 1: continue
name = t[0]
TT = t[1]
if TT == '-' : TT = 'STT_%s' % name.upper()
STT_LIST.append(TT)
DSTT_LIST.append('"%s", /* %s */' % (TT, TT))
for line in ALIAS.split('\n'):
if line == '' or line.startswith('#'): continue
t = line.split()
if not len(t) > 1: continue
name = t[0]
TT = t[1]
ALIASMAP[name] = TT
def write_file(f):
f.write('''
#ifndef GAMMA_MACRO
#define GAMMA_MACRO 1
''')
write_chapter(f, 'TT_MACRO')
n = 0
for tt in TT_LIST:
write_define(f, tt, '((knh_term_t)%d)' % n, 40)
n += 1
write_define(f, 'K_TOKEN_MAXSIZ', '((knh_term_t)%d)' % n, 40)
write_line(f)
#n = 0
for stt in STT_LIST:
write_define(f, stt, '((knh_term_t)%d)' % n, 40)
n += 1
write_define(f, 'K_STMT_MAXSIZ', '((knh_term_t)%d)' % n, 40)
write_line(f)
n = 0
for mn in MN_LIST:
write_define(f, mn, '((knh_methodn_t)%d)' % n, 40)
n += 1
write_define(f, 'MN_OPSIZE', '((knh_methodn_t)%d)' % n, 40)
write_line(f)
f.write('''
#endif/*GAMMA_MACRO*/
#ifdef K_USING_LOADDATA
typedef struct {
const char *name;
knh_uintptr_t tt;
} knh_TokenData_t;
static knh_TokenData_t knh_TokenData[] = {''')
for n in TOKENMAP.keys():
f.write('''
{"%s", %s}, ''' % (n, TOKENMAP[n]))
f.write('''
{NULL, 0}
};
static void knh_loadTokenData(Ctx *ctx)
{
knh_TokenData_t *data = knh_TokenData;
knh_DictSet_t *ds = DP(ctx->sys)->tokenDictSet;
while(data->name != NULL) {
knh_DictSet_set(ctx, ds, new_T(data->name), data->tt);
data++;
}
}
typedef struct {
const char *name;
const char *alias;
} knh_AliasData_t;
static knh_AliasData_t knh_AliasData[] = {''')
for n in ALIASMAP.keys():
f.write('''
{"%s", "%s"}, ''' % (n, ALIASMAP[n]))
f.write('''
{NULL, NULL}
};
static void knh_loadAliasTokenData(Ctx *ctx)
{
knh_AliasData_t *data = knh_AliasData;
knh_DictMap_t *dm = new_DictMap0(ctx, 0);
KNH_INITv(DP(ctx->share->mainns)->aliasDictMapNULL, dm);
while(data->name != NULL) {
knh_DictMap_set(ctx, dm, new_T(data->name), UPCAST(new_T(data->alias)));
data++;
}
}
#endif/*K_USING_LOADDATA*/
#ifdef K_USING_TERMCODE
/***********************************************
static knh_term_t knh_bytes_parseToken(Ctx *ctx, knh_bytes_t t)
{
knh_uintptr_t n = knh_DictSet_get__b(DP(ctx->sys)->tokenDictSet, t);
if(n > 0) {
knh_term_t tt = (knh_term_t)(n);
return TT_UNMASK(tt);
}
return TT_EOT;
}
static int knh_bytes_isKeyword(Ctx *ctx, knh_bytes_t t)
{
knh_uintptr_t n = knh_DictSet_get__b(DP(ctx->sys)->tokenDictSet, t);
if(n > 0) {
knh_term_t tt = (knh_term_t)(n);
return TT_ISKEYWORD(tt);
}
return TT_EOT;
}
********************************************/
#ifndef K_USING_SMALLFOOTPRINT
static const char* knh_tokens[] = {''')
for tt in DTT_LIST:
f.write('''
%s''' % (tt))
f.write('''
NULL
};
#endif/*SMALLFOOTPRINT*/
#ifndef K_USING_SMALLFOOTPRINT
static const char* knh_stmts[] = {''')
for stt in DSTT_LIST:
f.write('''
%s''' % (stt))
f.write('''
NULL
};
#endif/*SMALLFOOTPRINT*/
const char *TT__(knh_term_t tt)
{
if(tt < K_TOKEN_MAXSIZ) {
#ifdef K_USING_SMALLFOOTPRINT
return "TT";
#else
return knh_tokens[tt];
#endif
}
else {
#ifdef K_USING_SMALLFOOTPRINT
return "STT";
#else
return knh_stmts[tt - K_TOKEN_MAXSIZ];
#endif
}
}
typedef struct {
const char *opname;
int priority;
} knh_opdata_t;
static knh_opdata_t knh_opdata[] = {''')
for op in OP_LIST:
f.write('''
{%s}, ''' % (op))
f.write('''
{NULL, 0},
};
static int knh_term_getOPRpriority(knh_term_t tt)
{
if(TT_LET <= tt && tt <= TT_TSUB) {
return knh_opdata[tt-TT_LET].priority;
}
if(TT_ASIS <= tt && tt < TT_ERR) {
return 99;
}
return 0;
}
const char* knh_getopname(knh_methodn_t mn)
{
DBG_ASSERT(mn + TT_LET <= TT_TSUB);
return knh_opdata[mn].opname;
}
#endif/*K_USING_TERMCODE*/
''')
def gen_stmt(bdir):
f = open('include/konoha/konoha_grammar_.h', 'w')
f.write('''// THIS FILE WAS AUTOMATICALLY GENERATED
''')
readData()
write_file(f)
f.close()
#------------------------------------------------------------------------------
if __name__ == '__main__':
bdir = '..'
readData()
write_file(sys.stdout)
#gen_stmt(bdir)
| imasahiro/konoha-jit-deprecated | gentools/gen_gamma.py | Python | lgpl-3.0 | 10,949 |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import shutil
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
This currently uses Python 3.6 due to the automated Kokoro run of synthtool.
That run uses an image that doesn't have 3.6 installed. Before updating this
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=97",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python='3.6')
def docs(session):
"""Build the docs."""
session.install('sphinx', 'alabaster', 'recommonmark')
session.install('-e', '.')
shutil.rmtree(os.path.join('docs', '_build'), ignore_errors=True)
session.run(
'sphinx-build',
'-W', # warnings as errors
'-T', # show full traceback on exception
'-N', # no colors
'-b', 'html',
'-d', os.path.join('docs', '_build', 'doctrees', ''),
os.path.join('docs', ''),
os.path.join('docs', '_build', 'html', ''),
)
| tseaver/google-cloud-python | webrisk/noxfile.py | Python | apache-2.0 | 4,916 |
import numpy as np
a = {}
for i in np.arange(100000):
a[i] = pow(i/100., 10)
| guangtunbenzhu/BGT-Cosmology | Test/PowOrPower/test_pow.py | Python | mit | 82 |
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(
X,
n_support,
remaining_iterations=30,
initial_estimates=None,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int
Number of observations to compute the robust estimates of location
and covariance from. This parameter must be greater than
`n_samples / 2`.
remaining_iterations : int, default=30
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : tuple of shape (2,), default=None
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : bool, default=False
Verbose mode.
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location estimates.
covariance : ndarray of shape (n_features, n_features)
Robust covariance estimates.
support : ndarray of shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(
X,
n_support,
remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates,
verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
def _c_step(
X,
n_support,
random_state,
remaining_iterations=30,
initial_estimates=None,
verbose=False,
cov_computation_method=empirical_covariance,
):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while det < previous_det and remaining_iterations > 0 and not np.isinf(det):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print(
"Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations)
)
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn(
"Determinant has increased; this should not happen: "
"log(det) > log(previous_det) (%.15f > %.15f). "
"You may want to try with a higher value of "
"support_fraction (current value: %.3f)."
% (det, previous_det, n_support / n_samples),
RuntimeWarning,
)
results = (
previous_location,
previous_covariance,
previous_det,
previous_support,
previous_dist,
)
# Check early stopping
if remaining_iterations == 0:
if verbose:
print("Maximum number of iterations reached")
results = location, covariance, det, support, dist
return results
def select_candidates(
X,
n_support,
n_trials,
select=1,
n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int
The number of samples the pure data set must contain.
This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
n_trials : int or tuple of shape (2,)
Number of different initial sets of observations from which to
run the algorithm. This parameter should be a strictly positive
integer.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
select : int, default=1
Number of best candidates results to return. This parameter must be
a strictly positive integer.
n_iter : int, default=30
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
This parameter must be a strictly positive integer.
verbose : bool, default=False
Control the output verbosity.
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
See Also
---------
c_step
Returns
-------
best_locations : ndarray of shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : ndarray of shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : ndarray of shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError(
"Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)"
% (n_trials, type(n_trials))
)
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X,
n_support,
remaining_iterations=n_iter,
verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
)
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(
_c_step(
X,
n_support,
remaining_iterations=n_iter,
initial_estimates=initial_estimates,
verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
)
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip(
*all_estimates
)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(
X,
support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Estimate the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_sample + n_features + 1) / 2`. This parameter must be in the
range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = (
0.5
* (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
)
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
n_best_tot = 10
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset,
h_subset,
n_trials,
select=n_best_sub,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = select_candidates(
X[selection],
h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X,
n_support,
n_trials=n_trials,
select=n_best,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_best, covariances_best),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`(n_sample + n_features + 1) / 2`. The parameter must be in the range
(0, 1).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
References
----------
.. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.
.. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
.. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import MinCovDet
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = MinCovDet(random_state=0).fit(X)
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(
self,
*,
store_precision=True,
assume_centered=False,
support_fraction=None,
random_state=None,
):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fit a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = self._validate_data(X, ensure_min_samples=2, estimator="MinCovDet")
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn(
"The covariance matrix associated to your dataset is not full rank"
)
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X,
support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state,
)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(
X[raw_support], assume_centered=True
)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [RVD]_.
Parameters
----------
data : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : ndarray of shape (n_features, n_features)
Corrected robust covariance estimate.
References
----------
.. [RVD] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
"""
# Check that the covariance of the support data is not equal to 0.
# Otherwise self.dist_ = 0 and thus correction = 0.
n_samples = len(self.dist_)
n_support = np.sum(self.support_)
if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
raise ValueError(
"The covariance matrix of the support data "
"is equal to 0, try to increase support_fraction"
)
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates) described
in [RVDriessen]_.
Parameters
----------
data : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : ndarray of shape (n_features,)
Re-weighted robust location estimate.
covariance_reweighted : ndarray of shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
References
----------
.. [RVDriessen] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered
)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| manhhomienbienthuy/scikit-learn | sklearn/covariance/_robust_covariance.py | Python | bsd-3-clause | 33,495 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import shutil
import sys
import zipfile
from telemetry import decorators
from telemetry.core import browser_finder
from telemetry.core import command_line
from telemetry.core import util
from telemetry.page import page_runner
from telemetry.page import cloud_storage
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.page import test_expectations
from telemetry.results import page_test_results
Disabled = decorators.Disabled
Enabled = decorators.Enabled
class Benchmark(command_line.Command):
"""Base class for a Telemetry benchmark.
A test packages a PageTest/PageMeasurement and a PageSet together.
"""
options = {}
@classmethod
def Name(cls):
name = cls.__module__.split('.')[-1]
if hasattr(cls, 'tag'):
name += '.' + cls.tag
if hasattr(cls, 'page_set'):
name += '.' + cls.page_set.Name()
return name
@classmethod
def AddCommandLineArgs(cls, parser):
cls.PageTestClass().AddCommandLineArgs(parser)
if hasattr(cls, 'AddTestCommandLineArgs'):
group = optparse.OptionGroup(parser, '%s test options' % cls.Name())
cls.AddTestCommandLineArgs(group)
parser.add_option_group(group)
@classmethod
def SetArgumentDefaults(cls, parser):
cls.PageTestClass().SetArgumentDefaults(parser)
parser.set_defaults(**cls.options)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
cls.PageTestClass().ProcessCommandLineArgs(parser, args)
def CustomizeBrowserOptions(self, options):
"""Add browser options that are required by this benchmark."""
def Run(self, args):
"""Run this test with the given options."""
self.CustomizeBrowserOptions(args.browser_options)
test = self.PageTestClass()()
test.__name__ = self.__class__.__name__
if hasattr(self, '_disabled_strings'):
test._disabled_strings = self._disabled_strings
if hasattr(self, '_enabled_strings'):
test._enabled_strings = self._enabled_strings
ps = self.CreatePageSet(args)
expectations = self.CreateExpectations(ps)
self._DownloadGeneratedProfileArchive(args)
results = page_test_results.PageTestResults()
try:
results = page_runner.Run(test, ps, expectations, args)
except page_test.TestNotSupportedOnPlatformFailure as failure:
logging.warning(str(failure))
results.PrintSummary()
return len(results.failures) + len(results.errors)
def _DownloadGeneratedProfileArchive(self, options):
"""Download and extract profile directory archive if one exists."""
archive_name = getattr(self, 'generated_profile_archive', None)
# If attribute not specified, nothing to do.
if not archive_name:
return
# If profile dir specified on command line, nothing to do.
if options.browser_options.profile_dir:
logging.warning("Profile directory specified on command line: %s, this"
"overrides the benchmark's default profile directory.",
options.browser_options.profile_dir)
return
# Download profile directory from cloud storage.
found_browser = browser_finder.FindBrowser(options)
test_data_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', 'perf',
'generated_profiles',
found_browser.target_os)
generated_profile_archive_path = os.path.normpath(
os.path.join(test_data_dir, archive_name))
try:
cloud_storage.GetIfChanged(generated_profile_archive_path,
cloud_storage.PUBLIC_BUCKET)
except (cloud_storage.CredentialsError,
cloud_storage.PermissionError) as e:
if os.path.exists(generated_profile_archive_path):
# If the profile directory archive exists, assume the user has their
# own local copy simply warn.
logging.warning('Could not download Profile archive: %s',
generated_profile_archive_path)
else:
# If the archive profile directory doesn't exist, this is fatal.
logging.error('Can not run without required profile archive: %s. '
'If you believe you have credentials, follow the '
'instructions below.',
generated_profile_archive_path)
logging.error(e)
sys.exit(-1)
# Unzip profile directory.
extracted_profile_dir_path = (
os.path.splitext(generated_profile_archive_path)[0])
if not os.path.isfile(generated_profile_archive_path):
raise Exception("Profile directory archive not downloaded: ",
generated_profile_archive_path)
with zipfile.ZipFile(generated_profile_archive_path) as f:
try:
f.extractall(os.path.dirname(generated_profile_archive_path))
except e:
# Cleanup any leftovers from unzipping.
if os.path.exists(extracted_profile_dir_path):
shutil.rmtree(extracted_profile_dir_path)
logging.error("Error extracting profile directory zip file: %s", e)
sys.exit(-1)
# Run with freshly extracted profile directory.
logging.info("Using profile archive directory: %s",
extracted_profile_dir_path)
options.browser_options.profile_dir = extracted_profile_dir_path
@classmethod
def PageTestClass(cls):
"""Get the PageTest for this Benchmark.
If the Benchmark has no PageTest, raises NotImplementedError.
"""
if not hasattr(cls, 'test'):
raise NotImplementedError('This test has no "test" attribute.')
if not issubclass(cls.test, page_test.PageTest):
raise TypeError('"%s" is not a PageTest.' % cls.test.__name__)
return cls.test
@classmethod
def PageSetClass(cls):
"""Get the PageSet for this Benchmark.
If the Benchmark has no PageSet, raises NotImplementedError.
"""
if not hasattr(cls, 'page_set'):
raise NotImplementedError('This test has no "page_set" attribute.')
if not issubclass(cls.page_set, page_set.PageSet):
raise TypeError('"%s" is not a PageSet.' % cls.page_set.__name__)
return cls.page_set
@classmethod
def CreatePageSet(cls, options): # pylint: disable=W0613
"""Get the page set this test will run on.
By default, it will create a page set from the file at this test's
page_set attribute. Override to generate a custom page set.
"""
return cls.PageSetClass()()
@classmethod
def CreateExpectations(cls, ps): # pylint: disable=W0613
"""Get the expectations this test will run with.
By default, it will create an empty expectations set. Override to generate
custom expectations.
"""
if hasattr(cls, 'expectations'):
return cls.expectations
else:
return test_expectations.TestExpectations()
def AddCommandLineArgs(parser):
page_runner.AddCommandLineArgs(parser)
def ProcessCommandLineArgs(parser, args):
page_runner.ProcessCommandLineArgs(parser, args)
| chromium2014/src | tools/telemetry/telemetry/benchmark.py | Python | bsd-3-clause | 7,029 |
#!/usr/bin/env python3
from __future__ import print_function
import sys
import glob
import os
import re
exit_code = 0
for dir in ("test", "examples"):
cpp = set([os.path.basename(x) for x in glob.glob(dir + "/*.cpp")])
for build_file in ("Jamfile", "CMakeLists.txt"):
filename = os.path.join(dir, build_file)
if not os.path.exists(filename): continue
run = set(re.findall("([a-zA-Z0-9_]+\.cpp)", open(filename).read()))
diff = cpp - run
if diff:
print("NOT TESTED in %s\n " % filename +
"\n ".join(["%s/%s" % (dir, x) for x in diff]))
exit_code = 1
sys.exit(exit_code)
| Simran-B/arangodb | 3rdParty/boost/1.71.0/libs/histogram/tools/build_check.py | Python | apache-2.0 | 667 |
import asyncore
import unittest
import select
import os
import socket
import threading
import sys
import time
import errno
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEquals(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEquals(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEquals(lines, expected)
def test_issue_8594(self):
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
try:
d.foo
except AttributeError, err:
self.assertTrue('dispatcher instance' in str(err))
else:
self.fail("exception not raised")
# test cheap inheritance with the underlying socket
self.assertEqual(d.family, socket.AF_INET)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue("unknown error" in err.lower())
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
cap = StringIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = "It's not dead, it's sleeping!"
file(TESTFN, 'w').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), "It's not dead")
self.assertEqual(w.read(6), ", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = "Come again?"
d2 = "I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(file(TESTFN).read(), self.d + d1 + d2)
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual("".join(data), self.d)
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
| mgadi/naemonbox | sources/psdash/gevent-1.0.1/greentest/2.6/test_asyncore.py | Python | gpl-2.0 | 13,222 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# account_analytic_rename.py
# account_analytic_rename
# First author: Mag Guevara <[email protected]> (ClearCorp S.A.)
# Copyright (c) 2011-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
{
"name" : "Accounting Analytics rename",
"author" : "ClearCorp S.A.",
"version" : "1.0",
"depends" : ["base","account"],
"init_xml" : [],
"update_xml" : ["account_analytic_rename_view.xml"],
"category" : "Accounting",
"active" : False,
"instalable" : True,
}
| ClearCorp-dev/odoo-clearcorp | TODO-6.1/account_analytic_rename/__openerp__.py | Python | agpl-3.0 | 2,295 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A simple wrapper for Django templates.
The main purpose of this module is to hide all of the package import pain
you normally have to go through to get Django to work. We expose the Django
Template and Context classes from this module, handling the import nonsense
on behalf of clients.
Typical usage:
from google.appengine.ext.webapp import template
print template.render('templates/index.html', {'foo': 'bar'})
Django uses a global setting for the directory in which it looks for templates.
This is not natural in the context of the webapp module, so our load method
takes in a complete template path, and we set these settings on the fly
automatically. Because we have to set and use a global setting on every
method call, this module is not thread safe, though that is not an issue
for applications.
Django template documentation is available at:
http://www.djangoproject.com/documentation/templates/
"""
import md5
import os
try:
from django import v0_96
except ImportError:
pass
import django
import django.conf
try:
django.conf.settings.configure(
DEBUG=False,
TEMPLATE_DEBUG=False,
TEMPLATE_LOADERS=(
'django.template.loaders.filesystem.load_template_source',
),
)
except (EnvironmentError, RuntimeError):
pass
import django.template
import django.template.loader
from google.appengine.ext import webapp
def render(template_path, template_dict, debug=False):
"""Renders the template at the given path with the given dict of values.
Example usage:
render("templates/index.html", {"name": "Bret", "values": [1, 2, 3]})
Args:
template_path: path to a Django template
template_dict: dictionary of values to apply to the template
"""
t = load(template_path, debug)
return t.render(Context(template_dict))
template_cache = {}
def load(path, debug=False):
"""Loads the Django template from the given path.
It is better to use this function than to construct a Template using the
class below because Django requires you to load the template with a method
if you want imports and extends to work in the template.
"""
abspath = os.path.abspath(path)
if not debug:
template = template_cache.get(abspath, None)
else:
template = None
if not template:
directory, file_name = os.path.split(abspath)
new_settings = {
'TEMPLATE_DIRS': (directory,),
'TEMPLATE_DEBUG': debug,
'DEBUG': debug,
}
old_settings = _swap_settings(new_settings)
try:
template = django.template.loader.get_template(file_name)
finally:
_swap_settings(old_settings)
if not debug:
template_cache[abspath] = template
def wrap_render(context, orig_render=template.render):
URLNode = django.template.defaulttags.URLNode
save_urlnode_render = URLNode.render
old_settings = _swap_settings(new_settings)
try:
URLNode.render = _urlnode_render_replacement
return orig_render(context)
finally:
_swap_settings(old_settings)
URLNode.render = save_urlnode_render
template.render = wrap_render
return template
def _swap_settings(new):
"""Swap in selected Django settings, returning old settings.
Example:
save = _swap_settings({'X': 1, 'Y': 2})
try:
...new settings for X and Y are in effect here...
finally:
_swap_settings(save)
Args:
new: A dict containing settings to change; the keys should
be setting names and the values settings values.
Returns:
Another dict structured the same was as the argument containing
the original settings. Original settings that were not set at all
are returned as None, and will be restored as None by the
'finally' clause in the example above. This shouldn't matter; we
can't delete settings that are given as None, since None is also a
legitimate value for some settings. Creating a separate flag value
for 'unset' settings seems overkill as there is no known use case.
"""
settings = django.conf.settings
old = {}
for key, value in new.iteritems():
old[key] = getattr(settings, key, None)
setattr(settings, key, value)
return old
def create_template_register():
"""Used to extend the Django template library with custom filters and tags.
To extend the template library with a custom filter module, create a Python
module, and create a module-level variable named "register", and register
all custom filters to it as described at
http://www.djangoproject.com/documentation/templates_python/
#extending-the-template-system:
templatefilters.py
==================
register = webapp.template.create_template_register()
def cut(value, arg):
return value.replace(arg, '')
register.filter(cut)
Then, register the custom template module with the register_template_library
function below in your application module:
myapp.py
========
webapp.template.register_template_library('templatefilters')
"""
return django.template.Library()
def register_template_library(package_name):
"""Registers a template extension module to make it usable in templates.
See the documentation for create_template_register for more information."""
if not django.template.libraries.get(package_name, None):
django.template.add_to_builtins(package_name)
Template = django.template.Template
Context = django.template.Context
def _urlnode_render_replacement(self, context):
"""Replacement for django's {% url %} block.
This version uses WSGIApplication's url mapping to create urls.
Examples:
<a href="{% url MyPageHandler "overview" %}">
{% url MyPageHandler implicit_args=False %}
{% url MyPageHandler "calendar" %}
{% url MyPageHandler "jsmith","calendar" %}
"""
args = [arg.resolve(context) for arg in self.args]
try:
app = webapp.WSGIApplication.active_instance
handler = app.get_registered_handler_by_name(self.view_name)
return handler.get_url(implicit_args=True, *args)
except webapp.NoUrlFoundError:
return ''
| MatthewWilkes/mw4068-packaging | thirdparty/google_appengine/google/appengine/ext/webapp/template.py | Python | apache-2.0 | 6,655 |
#!/usr/bin/env python
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
#Disable warning by SSL certificate
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import wget
#Libraries to export results
import xlsxwriter
import json
from urlparse import urlparse
from bs4 import BeautifulSoup
import optparse
#Analyze metadata pdf
import PyPDF2
from PyPDF2 import PdfFileReader
#Analyze metadata docx
import docx
import datetime
#Parser arguments
import argparse
from argparse import RawTextHelpFormatter
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#define global vars
dork=["site:","-site:","filetype:","intitle:","intext:"]
urls = []
urls_clean = []
urls_final =[]
delete_bing=["microsoft","msn","bing"]
option = 0
metadata_files=[]
meta_author_array = []
meta_creator_array = []
meta_producer_array = []
####### FUNCTION CREATE A DORK ######
#********************************************************#
#Define and design the dork
def DesignDork( num,file_ext):
iteration=0
initial=1
count_bing=9
try:
while (iteration < num):
#WAITING A DORK IN BING
iteration = iteration +1
if initial==1:
print "\nSearching possible leak information...\n"
initial = 0
#First search in Bing
SearchBing = "https://www.bing.com/search?q="+dork[0]+target+" ("+dork[2]+"pdf+OR+"+dork[2]+"doc)&go=Buscar"
else:
#Bring the next Bing results - 50 in each page
SearchBing=SearchBing + "&first="+str(count_bing)+"&FORM=PORE"
count_bing=count_bing+50
SendRequest(SearchBing)
except:
pass
#********************************************************#
####### FUNCTION SEND REQUEST TO BING ######
#Doing the request to search
def SendRequest(dork):
#Verify if the resource is avaiable by HTTP or HTTPS
try:
#Requests
#Timeout to verify if the resource is available and verify to ignore SSL certificate
response=requests.get(dork,allow_redirects=True, timeout=10,verify=False)
except requests.exceptions.RequestException as e:
print "\nError connection to server!" + response.url,
pass
except requests.exceptions.ConnectTimeout as e:
print "\nError Timeout + dork"
pass
content = response.text
#PARSER HTML
#normalize a called with parameters
parser_html(file_ext,content)
#********************************************************#
####### FUNCTION PARSER HTML ######
#Definition and treatment of the parameters
def parser_html(type,content):
i = 0
soup = BeautifulSoup(content, 'html.parser')
for link in soup.find_all('a'):
try:
if (urlparse(link.get('href'))!='' and urlparse(link.get('href'))[1].strip()!=''):
#if file_ext == 1: -> Display the domains where the files are found.
if type == 1:
urls.append(urlparse(link.get('href'))[1]) #domain
else: # file_ext == 2 -> ofimatic files: pdf, doc,docx,xls,....
urls.append(link.get('href'))
except Exception as e:
#print(e)
pass
try:
#Delete duplicates
[urls_clean.append(i) for i in urls if not i in urls_clean]
except:
pass
try:
#Delete not domains belongs to target
for value in urls_clean:
if (value.find(delete_bing[0]) == -1):
if (value.find(delete_bing[1]) == -1):
if (value.find(delete_bing[2]) == -1):
urls_final.append(value)
except:
pass
####### FUNCTION DOWNLOADFILES ######
def ExportResults(data):
with open ('output.json','w') as f:
json.dump(data,f)
####### FUNCTION AnalyzeMetadata pdf ######
def Analyze_Metadata_pdf(filename):
####### FUNCTION AnalyzeMetadata ######
pdfFile = PdfFileReader(file(filename, 'rb'))
metadata = pdfFile.getDocumentInfo()
print ' - Document: ' + str(filename)
for meta in metadata:
value=(metadata[meta])
print ' - ' + meta + ':' + metadata[meta]
if meta == "/Author":
if value not in meta_author_array:
meta_author_array.append(value)
elif meta =="/Producer":
if value not in meta_producer_array:
meta_producer_array.append(value)
elif meta == "/Creator":
if value not in meta_creator_array:
meta_creator_array.append(value)
#Group the different arrays in one with all metadata
metadata_files.append(meta_author_array)
metadata_files.append(meta_producer_array)
metadata_files.append(meta_creator_array)
####### FUNCTION AnalyzeMetadata doc ######
def Analyze_Metadata_doc(fileName):
#Open file
docxFile = docx.Document(file(fileName,'rb'))
#Get the structure
docxInfo= docxFile.core_properties
#Print the metadata which it wants to display
attribute = ["author", "category", "comments", "content_status",
"created", "identifier", "keywords", "language",
"last_modified_by", "last_printed", "modified",
"revision", "subject", "title", "version"]
#run the list in a for loop to print the value of each metadata
print ' - Document: ' + str(fileName)
for meta in attribute:
metadata = getattr(docxInfo,meta)
value = metadata([meta])
if metadata:
if meta =="/Author":
if value not in meta_author_array:
meta_author_array.append(value)
elif meta == "/Producer":
if value not in meta_producer_array:
meta_producer_array.append(value)
elif meta =="/Creator":
if value not in meta_creator_array:
meta_creator_array.append(value)
#Separate the values unicode and time date
if isinstance(metadata, unicode):
print " \n\t" + str(meta)+": " + str(metadata)
elif isinstance(metadata, datetime.datetime):
print " \n\t" + str(meta)+": " + str(metadata)
####### FUNCTION CATEGORY FILE TO EXTRACT METADATA ######
def Analyze_Metadata(filename):
#Verify the ext to know the type of the file to diference of the analysis
ext=filename.lower().rsplit(".",1)[-1]
if ext =="pdf":
#call the function analyze metadata pdf
Analyze_Metadata_pdf(filename)
elif ((ext =="doc") or (ext=="docx")):
Analyze_Metadata_doc(filename)
else:
print "\nIt can't obtain the metadata. Skip the next!\n"
####### FUNCTION DOWNLOADFILES ######
def Display_Export_Metadata(data,output):
try:
print "-----------------------------------------------"
print "METADATA RESULTS BY CATEGORY"
print "\n################################################\n"
print "Users - Documents Author"
for user in data[0]:
print " " + str(user).encode('utf8')
print "\n##################################################\n"
print "Producer"
#print "Producer"+ str(data[1])
for producer in data[1]:
print "\t " + str(producer).encode('utf8')
print "\n################################################\n"
#print "Creator"+ str(data[2])
print "Creator"
for creator in data[2]:
print " " + str(creator).encode('utf8')
print "\n################################################\n"
print "-----------------------------------------------"
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
#json
if (output == 1):
print "Exporting the results in a metadata-json"
with open("metadata.json", 'w') as f:
json.dump(data, f)
#excel
if (output ==2):
print "Exporting the results in an excel"
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('metatada.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write(row, col, "Users")
worksheet.write(row, col+1, "Producer")
worksheet.write(row, col+2, "Creator")
row+=1
# Iterate over the data and write it out row by row.
for users in meta_author_array:
col = 0
worksheet.write(row, col, users)
row += 1
#update row
row=1
for producer in meta_producer_array:
col = 1
worksheet.write(row, col, producer)
row += 1
#update row
row=1
for creator in meta_creator_array:
col = 2
worksheet.write(row, col, creator)
row += 1
#close the excel
workbook.close()
except Exception as e:
print str(e)
####### FUNCTION DOWNLOADFILES ######
def Downloadfiles(urls_metadata,output):
try:
print "\nDo you like downloading these files to analyze metadata(Y/N)?"
#Convert to lower the input
resp = raw_input().lower()
if (resp == 'n'):
print "Exiting"
exit(1)
if ((resp != 'y') and (resp != 'n')):
print "The option is not valided. Please, try again it"
if (resp =='y'):
print "Indicate the location where you want to keep the files downloaded",
path = raw_input()
try:
for url in urls_metadata:
try:
filename = wget.download(url,path)
Analyze_Metadata(filename)
except:
pass
Display_Export_Metadata(metadata_files,output)
except:
pass
except Exception as e:
print str(e)
#********************************************************#
#Definition and treatment of the parameters
def ShowResults(newlist,num_files,target,output):
print "Files in the target "+target+" are:\n"
print "Files indexed:", len (urls_final)
for i in urls_final:
if i not in newlist:
newlist.append(i)
print i
#verify if the user wants to export results
if output == 'Y':
#Only it can enter if -j is put in the execution
ExportResults(newlist)
#Call to function to download the files
Downloadfiles(newlist,output)
#MAIN
parser = argparse.ArgumentParser(description="This script searchs files indexed in the main searches of a domain to detect a possible leak information", formatter_class=RawTextHelpFormatter)
parser.add_argument('-d','--domain', help="The domain which it wants to search",required=False)
parser.add_argument('-n','--search', help="Indicate the number of the search which you want to do",required=True)
parser.add_argument('-e','--ext', help="Indicate the option of display:\n\t1-Searching the domains where these files are found\n\t2-Searching ofimatic files\n\n", required=True)
parser.add_argument('-f','--export', help="Indicate the type of format to export results.\n\t1.json (by default)\n\t2.xlsx",required=False)
args = parser.parse_args()
print " _____ _ _ _ "
print " | __ \ | | | | | | "
print" | |__) |__ _ ___| |_| | ___ __ _| | __"
print" | _ // _` / __| __| | / _ \/ _` | |/ /"
print" | | \ \ (_| \__ \ |_| |___| __/ (_| | < "
print" |_| \_\__,_|___/\__|______\___|\__,_|_|\_\""
print "\n"
print """** Tool to automatic leak information using Bing Hacking
** Version 1.4
** Author: Ignacio Brihuega Rodriguez a.k.a N4xh4ck5
** DISCLAMER This tool was developed for educational goals.
** The author is not responsible for using to others goals.
** A high power, carries a high responsibility!"""
num_files=0
N = int (args.search)
target=args.domain
file_ext= int(args.ext)
output = (int) (args.export)
if output is None:
export=1
if ((output != 1) and (output !=2)):
print "The export is not valid"
exit(1)
#Call design the dork
try:
num_files = DesignDork(N,file_ext)
except:
pass
newlist=[]
#Called the function to display the results
ShowResults(newlist,num_files,target,output) | n4xh4ck5/RastLeak | old/rastleak_1_4.py | Python | gpl-3.0 | 11,317 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Cyril Sester, Emanuel Cino
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, fields, models
from openerp.osv import orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from datetime import datetime
from .product import GIFT_NAMES, SPONSORSHIP_CATEGORY
import logging
import time
logger = logging.getLogger(__name__)
class contract_group(models.Model):
_inherit = 'recurring.contract.group'
##########################################################################
# FIELDS #
##########################################################################
contains_sponsorship = fields.Boolean(
string='Contains sponsorship', compute='_contains_sponsorship',
readonly=True, default=lambda self: 'S' in self.env.context.get(
'default_type', 'O'))
##########################################################################
# FIELDS METHODS #
##########################################################################
@api.one
def _contains_sponsorship(self):
types = self.mapped('contract_ids.type')
self.contains_sponsorship = 'S' in types or 'SC' in types
##########################################################################
# PUBLIC METHODS #
##########################################################################
def generate_invoices(self, invoicer=None):
""" Add birthday gifts generation. """
invoicer = self._generate_birthday_gifts(invoicer)
invoicer = super(contract_group, self).generate_invoices(invoicer)
return invoicer
##########################################################################
# PRIVATE METHODS #
##########################################################################
@api.multi
def _generate_birthday_gifts(self, invoicer=None):
""" Creates the annual birthday gift for sponsorships that
have set the option for automatic birthday gift creation. """
logger.info("Automatic Birthday Gift Generation Started.")
if invoicer is None:
invoicer = self.env['recurring.invoicer'].with_context(
lang='en_US').create({'source': self._name})
self.env.context = self.with_context(
{'lang': 'en_US',
'recurring_invoicer_id': invoicer.id}).env.context
# Search active Sponsorships with automatic birthday gift
gen_states = self._get_gen_states()
contract_search = [('birthday_invoice', '>', 0.0),
('state', 'in', gen_states)]
if self.ids:
contract_search.append(('group_id', 'in', self.ids))
contract_obj = self.env['recurring.contract']
contract_ids = contract_obj.search(contract_search).ids
# Exclude sponsorship if a gift is already open
invl_obj = self.env['account.invoice.line']
product_id = self.env['product.product'].search(
[('name', '=', GIFT_NAMES[0])])[0].id
for con_id in contract_ids:
invl_ids = invl_obj.search([
('state', '=', 'open'),
('contract_id', '=', con_id),
('product_id', '=', product_id)])
if invl_ids:
contract_ids.remove(con_id)
if contract_ids:
total = str(len(contract_ids))
count = 1
logger.info("Found {0} Birthday Gifts to generate.".format(total))
gift_wizard = self.env['generate.gift.wizard'].create({
'description': _('Automatic birthday gift'),
'invoice_date': datetime.today().strftime(DF),
'product_id': product_id,
'amount': 0.0})
# Generate invoices
for contract in contract_obj.browse(contract_ids):
logger.info("Birthday Gift Generation: {0}/{1} ".format(
str(count), total))
gift_wizard.write({
'amount': contract.birthday_invoice})
gift_wizard.with_context(
active_ids=contract.id).generate_invoice()
count += 1
gift_wizard.unlink()
logger.info("Automatic Birthday Gift Generation Finished !!")
return invoicer
@api.multi
def _setup_inv_line_data(self, contract_line, invoice):
""" Contract gifts relate their invoice lines to sponsorship,
Correspondence sponsorships don't create invoice lines.
Add analytic account to invoice_lines.
"""
invl_data = False
contract = contract_line.contract_id
if contract.type != 'SC':
invl_data = super(contract_group, self)._setup_inv_line_data(
contract_line, invoice)
# If project is suspended, either skip invoice or replace product
if contract.type == 'S' and not \
contract.child_id.project_id.disburse_funds:
config_obj = self.env['ir.config_parameter']
suspend_config_id = config_obj.search([(
'key', '=',
'sponsorship_compassion.suspend_product_id')])
if not suspend_config_id:
return False
current_product = self.env['product.product'].with_context(
lang='en_US').browse(invl_data['product_id'])
if current_product.categ_name == SPONSORSHIP_CATEGORY:
invl_data.update(self.env[
'recurring.contract'].get_suspend_invl_data(
suspend_config_id.id))
if contract.type == 'G':
sponsorship = contract_line.sponsorship_id
if sponsorship.state in self._get_gen_states():
invl_data['contract_id'] = sponsorship.id
else:
raise orm.except_orm(
_('Invoice generation error'),
_('No active sponsorship found for child {0}. '
'The gift contract with id {1} is not valid.')
.format(sponsorship.child_code, str(contract.id)))
product_id = contract_line.product_id.id
partner_id = contract_line.contract_id.partner_id.id
analytic = self.env['account.analytic.default'].account_get(
product_id, partner_id, time.strftime('%Y-%m-%d'))
if analytic and analytic.analytic_id:
invl_data.update({
'account_analytic_id': analytic.analytic_id.id})
return invl_data
| ndtran/compassion-modules | sponsorship_compassion/model/contract_group.py | Python | agpl-3.0 | 7,426 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
## ##
## Copyright 2010-2012, Neil Wallace <[email protected]> ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###############################################################################
from PyQt4 import QtCore, QtGui
from lib_openmolar.common.qt4.dialogs import BaseDialog
class ExtendableDialog(BaseDialog):
'''
builds on BaseDialog, adding an area for advanced options
unlike BaseDialog.. this dialog has no spacer item by default
'''
def __init__(self, parent=None, remove_stretch=True):
BaseDialog.__init__(self, parent, remove_stretch)
self.button_box.setCenterButtons(False)
icon = QtGui.QIcon.fromTheme("go-down")
#: a pointer to the Advanced button
self.more_but = QtGui.QPushButton(icon, "&Advanced")
self.more_but.setFlat(True)
self.more_but.setCheckable(True)
self.more_but.setFocusPolicy(QtCore.Qt.NoFocus)
self.button_box.addButton(self.more_but, self.button_box.HelpRole)
self.setOrientation(QtCore.Qt.Vertical)
frame = QtGui.QFrame(self)
layout = QtGui.QVBoxLayout(frame)
self.setExtension(frame)
def set_advanced_but_text(self, txt):
self.more_but.setText(txt)
def _clicked(self, but):
'''
overwrite :doc:`BaseDialog` _clicked
checking to see if addvanced panel is to be displayed.
'''
if but == self.more_but:
self.showExtension(but.isChecked())
return
BaseDialog._clicked(self, but)
def add_advanced_widget(self, widg):
self.extension().layout().addWidget(widg)
if __name__ == "__main__":
app = QtGui.QApplication([])
dl = ExtendableDialog()
label = QtGui.QLabel("Test")
dl.insertWidget(label)
cb = QtGui.QCheckBox("advanced option")
dl.add_advanced_widget(cb)
dl.exec_()
app.closeAllWindows()
| rowinggolfer/openmolar2 | src/lib_openmolar/common/qt4/dialogs/extendable_dialog.py | Python | gpl-3.0 | 3,182 |
import unittest
import shlex
import re
from pykickstart.parser import KickstartParser
from pykickstart.sections import NullSection
from pykickstart.version import makeVersion
VALID_KICKSTART_OPTION_PATTERN = r"--[a-z0-9\-]+"
class ArgumentNamesStatic_TestCase(unittest.TestCase):
def setUp(self):
self._bad_options = []
self._reg_name_checker = re.compile(VALID_KICKSTART_OPTION_PATTERN)
self._handler = makeVersion()
def runTest(self):
self._check_commands()
self._check_sections()
self._report()
def _check_sections(self):
tmp_parser = KickstartParser(self._handler)
# KickstartParser registers internally all sections, so it is possible to get the names and
# instances from there. However, it handles some known sections implemented outside
# pykickstart by using NullSection instead of the actual class.
for section, instance in tmp_parser._sections.items():
if not isinstance(instance, NullSection):
arg_parser = instance._getParser()
self._check_parser_actions(arg_parser, section)
def _check_commands(self):
for command, cmdClass in self._handler.commandMap.items():
if command == "method":
continue
args = shlex.split(command, comments=True)
cmd = args[0]
ks_parser = self._handler.commands[cmd]
ks_parser.currentLine = command
ks_parser.currentCmd = args[0]
ks_parser.seen = True
arg_parser = ks_parser._getParser()
self._check_parser_actions(arg_parser, command, cmd_class=cmdClass)
def _check_parser_actions(self, arg_parser, cmd_name, cmd_class=None):
for action in arg_parser._get_optional_actions():
if action.deprecated:
continue
found_any_good = False
for option in action.option_strings:
if cmd_class and option.lstrip("-") in cmd_class.removedAttrs:
# caution removedAttrs does not include leading dashes
continue
is_good = self._reg_name_checker.fullmatch(option) is not None
found_any_good = found_any_good or is_good
if not is_good:
print("Found option with uppercase letters: %s for command %s." % (option, cmd_name))
if not found_any_good:
self._bad_options.append(tuple((cmd_name, action.option_strings)))
def _report(self):
if self._bad_options:
print("The following kickstart option sets do not include a lowercase only variant:")
for option_set in self._bad_options:
print("%s: %s" % (option_set[0], ", ".join(option_set[1])))
self.fail("Some options use uppercase letters and do not have a lowercase-only alias.")
| bcl/pykickstart | tests/args_names.py | Python | gpl-2.0 | 2,924 |
import io
import os
import msvcrt
import signal
import sys
from . import context
from . import spawn
from . import reduction
from . import util
from .compat import _winapi
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
method = 'spawn'
def __init__(self, process_obj):
os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1"
spawn._Django_old_layout_hack__save()
prep_data = spawn.get_preparation_data(process_obj._name)
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rhandle, whandle = _winapi.CreatePipe(None, 0)
wfd = msvcrt.open_osfhandle(whandle, 0)
cmd = spawn.get_command_line(parent_pid=os.getpid(),
pipe_handle=rhandle)
cmd = ' '.join('"%s"' % x for x in cmd)
with io.open(wfd, 'wb', closefd=True) as to_child:
# start process
try:
hp, ht, pid, tid = _winapi.CreateProcess(
spawn.get_executable(), cmd,
None, None, False, 0, None, None, None)
_winapi.CloseHandle(ht)
except:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
context.set_spawning_popen(self)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
context.set_spawning_popen(None)
def duplicate_for_child(self, handle):
assert self is context.get_spawning_popen()
return reduction.duplicate(handle, self.sentinel)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _winapi.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
if res == _winapi.WAIT_OBJECT_0:
code = _winapi.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_winapi.TerminateProcess(int(self._handle), TERMINATE)
except OSError:
if self.wait(timeout=1.0) is None:
raise
| flaviogrossi/billiard | billiard/popen_spawn_win32.py | Python | bsd-3-clause | 3,132 |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.7"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except (ImportError, AttributeError):
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = [
'Http', 'Response', 'ProxyInfo', 'HttpLib2Error', 'RedirectMissingLocation',
'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError',
'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# A request will be tried 'RETRIES' times if it fails at the socket/connection level.
RETRIES = 2
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
try:
# Users can optionally provide a module that tells us where the CA_CERTS
# are located.
import ca_certs_locater
CA_CERTS = ca_certs_locater.get()
except ImportError:
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-ride this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (
self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'])
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
def proxy_info_from_environment(method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = proxy_info_from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
def proxy_info_from_url(url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return ProxyInfo(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if use_proxy:
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s) ************" % (self.host, self.port)
if use_proxy:
print "proxy: %s ************" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
if self.proxy_info and self.proxy_info.isgood():
use_proxy = True
proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass = self.proxy_info.astuple()
else:
use_proxy = False
if use_proxy and proxy_rdns:
host = proxy_host
port = proxy_port
else:
host = self.host
port = self.port
address_info = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
for family, socktype, proto, canonname, sockaddr in address_info:
try:
if use_proxy:
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass)
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print "connect fail: (%s, %s)" % (self.host, self.port)
if use_proxy:
print "proxy: %s" % str((proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
except ImportError:
from google3.apphosting.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google3.apphosting.api.urlfetch import fetch
from google3.apphosting.api.urlfetch import InvalidURLError
def _new_fixed_fetch(validate_certificate):
def fixed_fetch(url, payload=None, method="GET", headers={},
allow_truncated=False, follow_redirects=True,
deadline=5):
return fetch(url, payload=payload, method=method, headers=header,
allow_truncated=allow_truncated,
follow_redirects=follow_redirects, deadline=deadline,
validate_certificate=validate_certificate)
return fixed_fetch
class AppEngineHttpConnection(httplib.HTTPConnection):
"""Use httplib on App Engine, but compensate for its weirdness.
The parameters key_file, cert_file, proxy_info, ca_certs, and
disable_ssl_certificate_validation are all dropped on the ground.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPConnection.__init__(self, host, port=port,
strict=strict, timeout=timeout)
class AppEngineHttpsConnection(httplib.HTTPSConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, strict=strict,
timeout=timeout)
self._fetch = _new_fixed_fetch(
not disable_ssl_certificate_validation)
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=proxy_info_from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
proxy_nfo_from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
# Keep Authorization: headers on a redirect.
self.forward_authorization_headers = False
def __getstate__(self):
state_dict = copy.copy(self.__dict__)
# In case request is augmented by some foreign object such as
# credentials which handle auth
if 'request' in state_dict:
del state_dict['request']
if 'connections' in state_dict:
del state_dict['connections']
return state_dict
def __setstate__(self, state):
self.__dict__.update(state)
self.connections = {}
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(RETRIES):
try:
if hasattr(conn, 'sock') and conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if hasattr(conn, 'sock') and conn.sock is None:
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i < RETRIES-1:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i < RETRIES-1:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
else:
content = ""
if method == "HEAD":
conn.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if 'authorization' in headers and not self.forward_authorization_headers:
del headers['authorization']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin with either
'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE,
etc. There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a
string object.
Any extra headers that are to be sent with the request should be
provided in the 'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if scheme == 'https':
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except (IndexError, ValueError):
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response({
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response({
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key.lower()] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key.lower()] = value
self.status = int(self.get('status', self.status))
self.reason = self.get('reason', self.reason)
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
| harshilasu/LinkurApp | y/google-cloud-sdk/platform/gsutil/third_party/httplib2/python2/httplib2/__init__.py | Python | gpl-3.0 | 69,586 |
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/test.db' | xperienced/flask-rest-boilerplate | config/development.py | Python | mit | 63 |
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="index.html"), name='index'),
url(r'^admin/', include(admin.site.urls)),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| livingbio/weather-parser | src/weather/urls.py | Python | mit | 494 |
import gmpy2
# import functools
#
# def memoize(obj):
# cache=obj.cache={}
# @functools.wraps(obj)
# def memoizer(*args,**kwargs):
# if args not in cache:
# cache[args]=obj(*args,**kwargs)
# return cache[args]
# return memoizer
class Memoize(object):
def __init__(self,foo):
self.foo = foo
self.memo= {}
def __call__(self, *args):
if args not in self.memo:
self.memo[args] = self.foo(*args)
return self.memo[args]
class SymConf(object):
"""
represents an unique network configuration.
* Important: in this representation, states are limited
up to maximum of 10 states.
"""
dimension=1
base =2
basemax =2
def __init__(self,val=0):
""" initialize the class. val may assume either string or
integer representation. Integer decomposition is performed
by gmpy2.
"""
tmp=val
if isinstance(val,str):
tmp = int(val,SymConf.base)
self.label,self.label_int,self.repetition = SymConf.get_representative(tmp)
def get(self):
""" return the representative configuration label"""
return self.label
def get_state(self,k):
""" return the state at node/position k held by configuration
Ex:
10100 -> [0, 0, 1, 0, 1]. state[2] = 1 and state[4]=1
"""
return self.label[k] # does not check if 0 < k < dimension - 1
def get_dimension(self):
""" returns number of nodes/elements for each representation"""
return SymConf.dimension
def set(self,conf):
self.label = conf
return None
def set_state(self,k,new_state):
""" change state[k] to new_state"""
self.label[k] = new_state
return self.label
def get_integer(self):
""" returns the corresponding integer with base self.base"""
return int(self.label,base)
def get_repetition(self):
""" returns the number of times the configuration repeats itself
after N cyclic permutations"""
return self.repetition
def get_count(self,state):
""" returns the state sum over all nodes: sum(state[k],k=0..N-1)"""
#return sum( int(x) for x in self.label )
return self.label.count(state)
def init_globals(base,N):
""" define global class variables such as base and dimension"""
SymConf.base = base
SymConf.dimension = N
SymConf.basemax = base**N
@Memoize
def get_representative(val,p=0):
""" return the representative configuration of val
with eigennumber p.
This function explores the correspondence between
finite groups {O} and permutation operations.
Given a single state S (for instance '0101'), the
linear combination with eigenvalue p is obtained
by (1/Normalization) sum_k ([p Ô]^k S.
We define the representative configuration as the
state with lowest integer
Ex: (base 2)
state representative (p=0 and N=4)
1000 0001
0100 0001
0101 0101
1010 0101
"""
#first let us consider only p=0 (no null norm)
if isinstance(val,str):
val = int(val,SymConf.base)
rep=val
current = val
repetition=1
for k in range(SymConf.dimension-1):
new = current * SymConf.base
shift,current = divmod(new,SymConf.basemax)
current = current + shift
if not (current > rep):
repetition = repetition + max(0, 1-abs(rep-current))
rep = current
return gmpy2.digits(rep,SymConf.base).zfill(SymConf.dimension),rep,repetition
# def get_all_representatives(p=0):
# lookup_int = {}
# lookup_str = {}
# for k in range(SymConf.basemax):
# x=SymConf.get_representative(k,p)
# lookup_str[k] = x[0]
# lookup_int[k] = x[1]
# reps_str=set(lookup_str.values())
# reps_int=set(lookup_int.values())
# return lookup_str,reps_str,lookup_int,reps_int
def get_basis(p=0):
""" return the vector space for p-sector"""
lookup = []
for k in range(SymConf.basemax):
lookup.append( SymConf.get_representative(k,p)[1])
return [SymConf(x) for x in set(lookup)]
def __repr__(self):
return '<SymConf %r>' % (self.label)
##==================================================
if __name__ == '__main__':
N=4
base=2
SymConf.init_globals(base,N)
from timeit import default_timer as timer
t=timer()
for i in range(base**(N)):
#print(SymConf(i))
SymConf(i)
print('Elapsed time without memoization %s' % (timer()-t))
t=timer()
for i in range(base**(N)):
#print(SymConf(i))
SymConf(i)
print('Elapsed time with memoization %s' % (timer()-t))
import sys
print(sys.getsizeof(SymConf))
print(sys.getsizeof(SymConf(0)))
for i in range(base**N):
print(i, SymConf(i).label_int )
a=SymConf.get_basis()
| gmnakamura/epidemic-transition-matrix | python/symmetric_configuration.py | Python | gpl-3.0 | 5,314 |
# Created By: Virgil Dupras
# Created On: 2008-07-21
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from ..testutil import eq_
from ...const import PaneType
from ..base import TestApp, with_app
# --- Pristine
@with_app(TestApp)
def test_set_query(app):
# Setting the 'query' property works"""
app.sfield.text = 'foobar'
eq_(app.sfield.text, 'foobar')
@with_app(TestApp)
def test_set_query_selects_transaction_pane(app):
# Setting the search query selects the transaction tab specifically. Previously, the tab that
# was selected was always the 3rd one, regardless of the type.
app.mw.close_pane(2) # we close the transactions pane
app.sfield.text = 'foo'
app.check_current_pane(PaneType.Transaction)
# --- Two transactions
def app_two_transactions():
app = TestApp()
app.add_account('Desjardins')
app.show_account()
app.add_entry(description='a Deposit', payee='Joe SixPack', checkno='42A', transfer='Income', increase='212.12')
# it's important for the test that this txns has no space in its fields
app.add_entry(description='Withdrawal', payee='Dunno-What-To-Write', checkno='24B', transfer='Cash', decrease='140')
app.show_tview()
return app
@with_app(app_two_transactions)
def test_account(app):
# when using the 'account:' search form, only account are searched. Also, commas can be used
# to specify more than one term
app.sfield.text = 'account: withdrawal,inCome'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'a Deposit')
@with_app(app_two_transactions)
def test_query_amount(app):
# Amounts can be queried, and the cents, when 0, can be ommited.
app.sfield.text = '140'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'Withdrawal')
@with_app(app_two_transactions)
def test_query_amount_exact(app):
# Amount searches are exact.
app.sfield.text = '212'
eq_(app.ttable.row_count, 0)
app.sfield.text = '212.12'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'a Deposit')
@with_app(app_two_transactions)
def test_query_amount_negative(app):
# When searching for amount, we ignore the amounts' sign.
app.sfield.text = '-140'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'Withdrawal')
@with_app(app_two_transactions)
def test_query_description(app):
# The query is case insensitive and works on description.
app.sfield.text = 'wiTH'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'Withdrawal')
@with_app(app_two_transactions)
def test_query_checkno(app):
# The query works on checkno.
app.sfield.text = '42a'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'a Deposit')
@with_app(app_two_transactions)
def test_query_checkno_partial(app):
# We don't match transactions that only partially match checkno (it doesn't make much sense).
app.sfield.text = '4'
eq_(app.ttable.row_count, 0)
@with_app(app_two_transactions)
def test_query_from(app):
# The 'from' account can be queried.
app.sfield.text = 'desJardins'
eq_(app.ttable.row_count, 2)
@with_app(app_two_transactions)
def test_query_payee(app):
# The query is case insensitive and works on payee.
app.sfield.text = 'siX'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'a Deposit')
@with_app(app_two_transactions)
def test_query_space(app):
# Querying for a space character doesn't cause a crash. Previously, it did because it was
# parsed as an amount.
app.show_tview()
app.sfield.text = ' ' # no crash
eq_(app.ttable.row_count, 2) # same as no filter
@with_app(app_two_transactions)
def test_query_to(app):
# The 'to' account can be queried.
app.sfield.text = 'inCome'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'a Deposit')
@with_app(app_two_transactions)
def test_dont_parse_amount_with_expression(app):
# Don't parse the amount with the 'with_expression' option. It doesn't make sense.
app.sfield.text = '100+40' # The txn with the '140' amount shouldn't show up.
eq_(app.ttable.row_count, 0)
# ---
def app_ambiguity_in_txn_values():
# Transactions have similar values in different fields
app = TestApp()
app.add_txn(description='foo1', payee='foo2', checkno='foo3', from_='foo4', to='foo5', amount='42')
app.add_txn(description='foo2', payee='foo3', checkno='foo4', from_='foo5', to='foo1', amount='43')
return app
@with_app(app_ambiguity_in_txn_values)
def test_targeted_description_search(app):
app.sfield.text = 'description:foo1'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'foo1')
@with_app(app_ambiguity_in_txn_values)
def test_targeted_payee_search(app):
app.sfield.text = 'payee:foo2'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'foo1')
@with_app(app_ambiguity_in_txn_values)
def test_targeted_checkno_search(app):
app.sfield.text = 'checkno:foo3'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'foo1')
# --- Three txns with zero amount
def app_three_txns_with_zero_amount():
app = TestApp()
app.add_txn(description='foo', amount='212.12')
app.add_txn(description='bar', amount='140')
app.add_txn(description='zero-amount', amount='0')
return app
@with_app(app_three_txns_with_zero_amount)
def test_query_amount_with_zero_amount_txn(app):
# querying an amount with a zero amount in the stack doesn't cause a crash
app.sfield.text = '212.12' # no crash
eq_(app.ttable.row_count, 1)
# --- Split
def app_split():
app = TestApp()
splits = [
('first', 'memo1', '42', ''),
('second', 'Memo2', '', '42'),
('third', '', '12', ''),
]
app.add_txn_with_splits(splits)
return app
@with_app(app_split)
def test_query_memo(app):
# memo fields are part of the search query
app.sfield.text = 'memo2'
eq_(app.ttable.row_count, 1)
@with_app(app_split)
def test_query_split_account(app):
# Any account in a split can match a sfield query.
app.sfield.text = 'third'
eq_(app.ttable.row_count, 1)
# --- Three txns filtered
def app_three_txns_filtered():
app = TestApp()
app.add_txn(description='foo')
app.add_txn(description='bar')
app.add_txn(description='bar')
app.sfield.text = 'bar'
app.clear_gui_calls()
return app
@with_app(app_three_txns_filtered)
def test_change_account(app):
# Changing selection to another account cancels the filter.
app.show_nwview()
eq_(app.sfield.text, '')
# setting the sfield query didn't make document go to all_transactions again
eq_(app.mainwindow.current_pane_index, 0)
app.sfield.view.check_gui_calls(['refresh'])
app.show_tview()
eq_(app.ttable.row_count, 3)
@with_app(app_three_txns_filtered)
def test_change_account_to_bsheet(app):
# Balance sheet is another notification, so we must also test it in addition to test_change_account.
app.show_nwview()
eq_(app.sfield.text, '')
app.sfield.view.check_gui_calls(['refresh'])
app.show_tview()
eq_(app.ttable.row_count, 3)
@with_app(app_three_txns_filtered)
def test_modify_transaction_out_of_filter(app):
# When changing a txn so it doesn't match the filter anymore, remove it.
row = app.ttable.selected_row
row.description = 'baz'
app.ttable.save_edits()
eq_(app.ttable.row_count, 1)
eq_(app.ttable.selected_indexes, [0])
# --- Grouped and ungrouped txns
def app_grouped_and_ungrouped_txns():
app = TestApp()
app.add_group('MyGroup')
app.add_account('Grouped', group_name='MyGroup')
app.add_account('Ungrouped')
app.add_txn(description='first', from_='Grouped')
app.add_txn(description='second', from_='Ungrouped')
return app
@with_app(app_grouped_and_ungrouped_txns)
def test_query_group(app):
app.sfield.text = 'group:foo,mygRoup'
eq_(app.ttable.row_count, 1)
eq_(app.ttable[0].description, 'first')
| brownnrl/moneyguru | core/tests/gui/search_field_test.py | Python | gpl-3.0 | 8,232 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
import tvm.topi.testing
# "unquantize" a quantized tensor
def recover(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(x_recovered, y_recovered, scale, zp):
mul = x_recovered * y_recovered
output = np.around(mul / scale + zp)
q_min = np.iinfo(np.uint8).min
q_max = np.iinfo(np.uint8).max
return np.clip(output, q_min, q_max)
def test_tflite_same_io_qnn_params():
data_dtype = "uint8"
lhs_scale = rhs_scale = output_scale = 0.00784314
lhs_zero_point = rhs_zero_point = output_zero_point = 127
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((1, 153, 2, 178)).reshape((1, 4)),
np.array((25, 1, 178, 216)).reshape((1, 4)),
np.array((25, 153, 1, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 1, 8)).reshape((1, 4)),
np.array((204, 178, 191, 1)).reshape((1, 4)),
np.array((204, 178, 1, 191)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
def test_tflite_different_io_qnn_params():
data_dtype = "uint8"
lhs_scale = 0.0156863
lhs_zero_point = 127
rhs_scale = 0.0117647
rhs_zero_point = 85
output_scale = 0.0235294
output_zero_point = 128
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
def test_saturation():
# Same params
data_dtype = "uint8"
lhs_scale = rhs_scale = output_scale = 0.125
lhs_zero_point = rhs_zero_point = output_zero_point = 0
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 128, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
# Same params, different scale
lhs_scale = rhs_scale = 0.125
output_scale = 0.25
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
# All params different
lhs_scale = 0.5
rhs_scale = 0.25
output_scale = 0.125
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 0, 1, 0)).reshape((1, 4))
y_data = np.array((0, 128, 64, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
| dmlc/tvm | tests/python/relay/test_op_qnn_mul.py | Python | apache-2.0 | 8,788 |
import Display
import Helper
from skimage.color import rgb2gray
import numpy as np
from scipy import misc
from sklearn import svm, grid_search
from skimage import img_as_ubyte, io
from sklearn import decomposition
import matplotlib.pyplot as plt
import string
import random
import os, sys
# Load train data
train_filenames = []
for filename in os.listdir("../train/positive"):
if(filename != ".DS_Store"): train_filenames.append("../train/positive/" + filename)
train_targets = [1]*(len(os.listdir("../train/positive"))-1)
for filename in os.listdir("../train/negative"):
if(filename != ".DS_Store"): train_filenames.append("../train/negative/" + filename)
train_targets = train_targets + [0]*(len(os.listdir("../train/negative"))-1)
n_train_samples = len(train_filenames)
sample_size = 20*20
train_data = np.zeros((n_train_samples, sample_size))
i = 0
for filename in train_filenames:
img = io.imread(filename)
train_data[i] = img.flatten()
i = i + 1;
# Load test data
test_filenames = []
for filename in os.listdir("test"):
if(filename != ".DS_Store"): test_filenames.append("../test/" + filename)
n_test_samples = len(test_filenames)
test_data = np.zeros((n_test_samples, sample_size))
i = 0
for filename in test_filenames:
img = io.imread(filename)
test_data[i] = img.flatten()
i = i + 1;
# Visualise
n_positives = len(os.listdir("../train/positive"))-1
train_data_reduced = decomposition.PCA(n_components=2).fit_transform(train_data)
positives = decomposition.PCA(n_components=2).fit_transform(train_data[:n_positives])
negatives = decomposition.PCA(n_components=2).fit_transform(train_data[n_positives:])
# fig, ax1 = plt.subplots()
# ax1.scatter(positives[:, 0], positives[:, 1], color='b')
# ax2 = ax1.twinx()
# ax2.scatter(negatives[:, 0], negatives[:, 1], color='r')
# plt.show()
# create a mesh to plot in
h = 1000 # step size in the mesh
x_min, x_max = positives[:, 0].min() - 1, train_data_reduced[:, 0].max() + 1
y_min, y_max = train_data_reduced[:, 1].min() - 1, train_data_reduced[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
fig, ax1 = plt.subplots()
classifier = svm.SVC(kernel='rbf', gamma=0.7, C=0.5)
#classifier = svm.SVC(kernel='rbf', gamma=0.7, C=C)
classifier.fit(train_data_reduced, train_targets)
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
print(Z)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
X = train_data_reduced
y = train_targets
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| oduwa/Pic-Numero | PicNumero/svm_visualise.py | Python | mit | 2,779 |
# Licensed under the Apache License Version 2.0: http://www.apache.org/licenses/LICENSE-2.0.txt
__author__ = 'Giles Richard Greenway'
import base
from db import select,get_weekday
from ckan.lib.celery_app import celery
from celery.task.sets import subtask
from celery import group
from itertools import chain,groupby
import dateutil.parser
from datetime import datetime
import requests
import json
from st_cluster import cluster
from playscrape import get_app_details
def task_imports():
return ['ckanext.mobileminer.tasks']
resources = base.get_resources()
local = base.get_local()
def get_users():
return [ r['uid'] for r in local.action.datastore_search(resource_id=resources.get('user'))['records'] ]
def get_user_apps(uid):
return [ r['process'] for r in local.action.datastore_search(resource_id=resources.get('userapps'), filters={'uid':uid}, sort='process')['records'] ]
def get_cell(mcc,mnc,lac,cellid):
config = base.get_config()
open_cell_key = config.get('settings', 'open_cell_key')
open_cell_url = config.get('settings', 'open_cell_url')
print ','.join([mcc,mnc,lac,cellid])
payload = {'key':open_cell_key,'mcc':mcc,'mnc':mnc,'lac':lac,'cellid':cellid,'format':'json'}
res = requests.get(open_cell_url, params=payload)
try:
print res.text
raw = json.loads(res.text)
return {'mcc':mcc, 'mnc':mnc, 'lac':lac, 'cid':raw['cellid'],
'lat':raw['lat'], 'lon':raw['lon'], 'changeable':raw['changeable'], 'reterieved':datetime.now().isoformat()}
except:
return False
# http://stackoverflow.com/questions/13271056/how-to-chain-a-celery-task-that-returns-a-list-into-a-group
@celery.task(name = "NAME.dmap")
def dmap(it, callback):
# Map a callback over an iterator and return as a group
callback = subtask(callback)
return group(callback.clone([arg,]) for arg in it)()
def for_all_users(job):
(user_list.s() | dmap.s(job.s())).delay()
@celery.task(name = "NAME.userappsupdate")
def build_user_apps():
for uid in get_users():
#print uid
for table,field in [('socket','process'),('networktraffic','process'),('notification','package')]:
ex_query = field+' NOT IN (' + select(['process'],'userapps',eq={'uid':uid}) + ')'
sql_query = select([field],table,eq={'uid':uid},where=[ex_query])
#print sql_query
records = [ {'uid':uid,'process':r[field]} for r in local.action.datastore_search_sql(sql=sql_query)['records'] ]
local.action.datastore_upsert(resource_id=resources['userapps'],records=records,method='insert')
@celery.task(name = "NAME.dailyappusageupdate")
def daily_usage_update():
mget = lambda t,d,v: d.get(t,{}).get(v,0)
def builder(table,time_field,proc_field='process',conditions={},total=False):
try:
max_date = local.action.datastore_search_sql(sql=select(['MAX(date)'],'dailyappusage',eq={'uid':uid,'process':app}))['records'][0]['max']
except:
max_date = None
if max_date:
gt = {time_field,max_date}
else:
gt = {}
date_cond = "date_trunc('day',"+time_field+")"
selecter = ["COUNT(*)",date_cond]
if total:
selecter.append('SUM('+total+')')
fields = ['count','sum']
else:
fields = ['count']
cond = dict(conditions.items() + [('uid',uid),(proc_field,app)])
sql_query = select(selecter,table,eq=cond, gt=gt,
having='COUNT(*) > 0',group=date_cond,distinct=False)
return dict([ (rec['date_trunc'],dict([ (field,rec[field]) for field in fields ])) for rec in local.action.datastore_search_sql(sql=sql_query)['records']
if rec.get('date_trunc',False) ])
for uid in get_users():
print uid
for app in get_user_apps(uid):
print app
sockets = builder('socket','opened')
notifications = builder('notification','time',proc_field='package')
rx_traffic = builder('networktraffic','start',conditions={'tx':0},total='bytes')
tx_traffic = builder('networktraffic','start',conditions={'tx':1},total='bytes')
all_the_dates = set(chain.from_iterable([ i.keys() for i in [sockets,notifications,rx_traffic,tx_traffic] ]))
data = [ {'uid':uid, 'process':app, 'sockets':mget(date,sockets,'count'), 'notifications':mget(date,notifications,'count'),
'traffic_in':mget(date,rx_traffic,'count'), 'traffic_out':mget(date,tx_traffic,'count'),
'data_in':int(mget(date,rx_traffic,'sum'))/1024, 'data_out':int(mget(date,tx_traffic,'sum'))/1024,
'date':date.split('T')[0], 'day':base.weekdays[dateutil.parser.parse(date).isoweekday()] } for date in all_the_dates ]
local.action.datastore_upsert(resource_id=resources['dailyappusage'],records=data,method='insert')
#@celery.task(name = "NAME.minerusageupdate")
#def miner_usage_update():
# day_length = 24.0 * 3600.0
# def get_logs(uid,gt):
# searching = True
# page = 0
# while searching:
# print select(["date_trunc('day',start)",'start','stop'],'minerlog',eq={'uid':uid},gt=gt,order='start',page=page)
# logs = local.action.datastore_search_sql(sql=select(["date_trunc('day',start)",'start','stop'],'minerlog',eq={'uid':uid},gt=gt,order='start',page=page))['records']
# if len(logs) == 0:
# searching = False
# else:
# for log in logs:
# yield log
# page += 1
# for uid in get_users():
# data = []
# max_date = local.action.datastore_search_sql(sql=select(['MAX(date)'],'dailyminerusage',eq={'uid':uid}))['records'][0]['max']
# if max_date:
# gt = {'start':'to_date('+max_date+')'}
# else:
# gt = {}
# for date, days in groupby(get_logs(uid,gt),lambda k: k['date_trunc']):
# week_day = base.weekdays[dateutil.parser.parse(date).isoweekday()]
# total = sum([ (dateutil.parser.parse(rec['stop'])-dateutil.parser.parse(rec['start'])).total_seconds() for rec in days ])
# data.append({'uid':uid, 'percentage':int(100*total/day_length), 'date':date, 'day':week_day})
# local.action.datastore_upsert(resource_id=resources['dailyminerusage'],records=data,method='insert')
@celery.task(name = "NAME.gsmupdate")
def gsm_update():
cell_fields = ['mcc','mnc','lac','cid']
intervals = lambda x,y: zip(range(0,x,y),range(0,x+y,y)[1:])
all_the_mcc = [ record['mcc'] for record in local.action.datastore_search_sql(sql=select(['mcc'],'gsmcell',ne={'mcc':'None'}))['records'] ]
for mcc in all_the_mcc:
all_the_mnc = [ record['mnc'] for record in local.action.datastore_search_sql(sql=select(['mnc'],'gsmcell',eq={'mcc':mcc}))['records'] ]
for mnc in all_the_mnc:
#print mcc,mnc
lac_search = True
lac_page = 0
while lac_search:
all_the_lacs = [ record['lac'] for record in local.action.datastore_search_sql(sql=select(['lac'],'gsmcell',eq={'mcc':mcc,'mnc':mnc},page=lac_page))['records'] ]
if len(all_the_lacs) == 0:
lac_search = False
lac_page += 1
for lac in all_the_lacs:
eq = {'mcc':mcc,'mnc':mnc,'lac':lac}
searching = True
page = 0
while searching:
ex_query = 'cid NOT IN (' + select(['cid::text'],'gsmlocation',eq=eq) + ')'
sql_query = select(['cid'],'gsmcell',eq=eq,where=[ex_query],page=page)
cells = [ r['cid'] for r in local.action.datastore_search_sql(sql=sql_query)['records'] ]
if len(cells) == 0:
searching = False
else:
page += 1
rendered_cells = [c for c in [ get_cell(mcc,mnc,lac,i) for i in cells ] if c ]
local.action.datastore_upsert(resource_id=resources['gsmlocation'],records=rendered_cells,method='insert')
return False
@celery.task(name = "NAME.usercells")
def user_cells():
searching = True
page = 0
while searching:
sql_query = select(['mcc','mnc','lac','cid','lat','lon','_id'],'gsmlocation',page=page)
cells = local.action.datastore_search_sql(sql=sql_query)['records']
if len(cells) == 0:
searching = False
page += 1
for cell in cells:
mcc,mnc,lac,cid = [ str(cell[key]) for key in ['mcc','mnc','lac','cid'] ]
ref,lat,lon = [ cell[key] for key in ['_id','lat','lon'] ]
user_search = True
user_page = 0
print ','.join([mcc,mnc,lac,cid])
while user_search:
sql_query = select(['COUNT(*)','uid'],'gsmcell',eq={'mcc':mcc,'mnc':mnc,'lac':lac,'cid':cid},group='uid',page=user_page)
users = local.action.datastore_search_sql(sql=sql_query)['records']
if len(users) == 0:
user_search = False
user_page += 1
for uid in [ u['uid'] for u in users ]:
print uid
local.action.datastore_delete(resource_id = resources['userlocations'],filters={'uid':uid,'cid':ref})
local.action.datastore_upsert(resource_id = resources['userlocations'],
records = [ {'uid':user['uid'], 'count':user['count'], 'cid':ref, 'lat':lat, 'lon':lon} for user in users ],
method = 'insert')
@celery.task(name = "NAME.dailygsmupdate")
def daily_gsm_update():
for uid in get_users():
max_date = local.action.datastore_search_sql(sql=select(['MAX(date)'],'dailygsmcells',eq={'uid':uid}))['records'][0]['max']
if max_date:
gt = {'date':'to_date('+max_date+')'}
else:
gt = {}
searching = True
page = 0
while searching:
sql_query = select(['COUNT(*)',"date_trunc('day',time)"],'gsmcell',eq={'uid':uid},gt=gt,group="date_trunc('day',time)",page=page)
days = local.action.datastore_search_sql(sql=sql_query)['records']
if len(days) == 0:
searching = False
else:
page += 1
data = [ {'uid':uid, 'count':day['count'], 'date':day['date_trunc'], 'day':get_weekday(day['date_trunc']) } for day in days ]
local.action.datastore_upsert(resource_id=resources['dailygsmcells'],records=data,method='insert')
@celery.task(name = "NAME.cellclusters")
def cell_clusters():
def cell_getter(user):
eq = {'uid':user}
page = 0
searching = True
while searching:
query = 'SELECT t2.lat, t2.lon, t1.time FROM "' + resources['gsmcell'] + '" AS t1 JOIN "' + resources['gsmlocation'] + '" AS t2 '
query += 'ON t1.mcc = t2.mcc::text AND t1.mnc = t2.mnc::text AND t1.lac = t2.lac::text AND t1.cid = t2.cid::text WHERE t1.uid = ' + user
query += ' ORDER BY t1.time LIMIT 256 OFFSET '+ str(page*256)
cells = local.action.datastore_search_sql(sql=query)['records']
print str(user)+' '+str(len(cells))
#print str(len(cells))+' '+str(user)
if len(cells) == 0:
searching = False
else:
page += 1
last_time =''
for cell in cells:
if cell['time'] <> last_time:
yield (float(cell['lat']),float(cell['lon']),dateutil.parser.parse(cell['time']))
last_time = cell['time']
cluster_resource = resources['gsmclusters']
sequence_resource = resources['clustersequence']
for uid in get_users():
clusters, sequence = cluster(cell_getter(uid))
print "Found " + str(len(clusters)) + " clusters for user " + str(uid)
if len(clusters) > 0:
map(lambda c: c.update({'uid':uid}),clusters)
local.action.datastore_delete(resource_id=cluster_resource,filters={'uid':uid})
local.action.datastore_upsert(resource_id=cluster_resource,records=clusters,method='insert')
map(lambda c: c.update({'uid':uid}),sequence)
local.action.datastore_delete(resource_id=sequence_resource,filters={'uid':uid})
local.action.datastore_upsert(resource_id=sequence_resource,records=sequence,method='insert')
@celery.task(name = "NAME.appdetails")
def app_details():
app_resource = resources['appinfo']
page = 0
searching = True
while searching:
sql_query = select(['process'],'userapps',page=page)
apps = [ rec['process'] for rec in local.action.datastore_search_sql(sql=sql_query)['records'] ]
if len(apps) == 0:
searching = False
else:
page += 1
for app in apps:
if app == 'uk.ac.kcl.odo.mobileminer':
continue
print app
package = app.split(':')[0]
if len(local.action.datastore_search_sql(sql=select(['package'],'appinfo',eq={'package':package}))['records']) == 0:
print package
this_package = get_app_details(package)
if this_package:
try:
local.action.datastore_upsert(resource_id=app_resource,records=[this_package],method='insert')
except:
print this_package
| kingsBSD/MobileMinerPlugin | ckanext-mobileminer/ckanext/mobileminer/tasks.py | Python | apache-2.0 | 13,862 |
import os
import numpy as np
from scipy import fftpack
import scipy.special as special
def generate_coefficents(data, N_coef=20):
"""
take the fft of some time series data and get the meaningful part of it
@param data, the input time series data
@param period, the length of a period
@param, the number of coeffcients to return,
the minimum of the number of coefficients generated by the fft or N_Coef
"""
data_real = np.real(data) # get the real values
F = fftpack.fft(data_real)/len(data_real)
F[1:] = F[1:]*2.0
return F[:min(F.shape[0],N_coef)]
# data from http://www.mie.utoronto.ca/labs/bsl/data.html
def get_coefficients():
"""
The fft of this data used exp(1.0j) vs exp(-1.0j) thus need conjugate
"""
coef = [(1.0, 0), (0.179325, 0.498146), (-0.385872, 0.511875), (-0.450811, -0.329866),
(-0.106449, -0.263848), (0.295767, -0.317235), (0.194631, 0.177449),
(-0.0336471, 0.0964081), (-0.0444237, 0.0253163), (-0.0253872, -0.0337126),
(0.0242532, -0.0221089), (0.0197074, 0.00370957), (0.00928437, 0.0083496),
(0.00165714, 0.0120435)]
coef = np.array([complex(c[0], -c[1]) for c in coef])
return coef
def reconstruct(coef, T=1.0, t_pts=200):
""" Calculate flow rate with sines and cosines
"""
t = np.linspace(0,T,t_pts)
Q = np.zeros(t.shape)
omega = 2.0*np.pi/T
for idx, c in enumerate(coef):
Q += c.real*np.cos(omega*idx*t) - c.imag*np.sin(omega*idx*t)
return Q, t
def reconstruct_pt(coef, t, T=1.0):
""" reconstruct the value at one point
using sines and cosines
"""
Q = 0.0
omega = 2.0*np.pi/T
for idx, c in enumerate(coef):
Q += c.real*np.cos(omega*idx*t) - c.imag*np.sin(omega*idx*t)
return Q
def reconstruct2(coef, T=1.0, t_pts=200):
"""Calculate flow rate with exponentials
of the fft was exp(-1.0j) then the reconstruction is exp(1.0j)
"""
t = np.linspace(0,T,t_pts)
Q = np.zeros(t.size, dtype=np.complex)
omega = 2.0*np.pi/T
for idx, c in enumerate(coef):
#print(c[0]*np.exp(1.0j*np.pi*idx*t/T))
Q += (c * np.exp(1.0j*omega*idx*t))
return Q.real, t
def reconstruct_pt2(coef, t, T=1.0):
""" reconstruct the value at one point
using exponentials
"""
Q = 0.0j
omega = 2.0*np.pi/T
for idx, c in enumerate(coef):
#print(c[0]*np.exp(1.0j*np.pi*idx*t/T))
Q += (c * np.exp(1.0j*omega*idx*t))
return Q.real
def womersley_number(rho, omega, mu, R, n=1.0):
"""Returns the womersley number alpha.
@param R: pipe radius
@param omega: oscillation frequency
@param mu: viscosity
@param rho: fluid density
"""
return np.sqrt(n*rho*omega/mu)*R
def womersley_velocity(coef, rho, omega, mu, R, r, t):
"""Returns the analytical velocity profile from fourier coefficients
@param coef: fft velocity coefficients
@param rho: density
@param omega: frequency, 1/T
@param mu: viscosity
@param R: pipe radius
@param r: radius coordinate
@param t: time coordinate
"""
term_1 = coef[0] * (1.0-(r/R)**2) # steady flow portion
term_2 = complex(0.0, 0.0j) # unsteady portion defined below
if r < 0.0:
r = abs(r)
for n, c in enumerate(coef):
if n == 0:
continue
alpha_n = womersley_number(rho, omega, mu, R, n)
lambda_ = np.sqrt(1.0j**3) * alpha_n
exp_term = np.exp(1.0j * n * omega * t)
J_num = special.jn(0, lambda_ * r / R) / special.jn(0, lambda_)
term_2 += c * (1.0 - J_num) * exp_term
return term_2 + term_1
def womersley_parts(coef, rho, omega, mu, R, r, t):
"""Returns the analytical velocity profile from fourier coefficients
@param coef: fft velocity coefficients
@param rho: density
@param omega: frequency, 1/T
@param mu: viscosity
@param R: pipe radius
@param r: radius coordinate
@param t: time coordinate
"""
term_1 = coef[0] * (1.0-(r/R)**2) # steady flow portion
term_2 = complex(0.0, 0.0j) # unsteady portion defined below
if r < 0.0:
r = abs(r)
for n, c in enumerate(coef):
if n == 0:
continue
alpha_n = womersley_number(rho, omega, mu, R, n)
lambda_ = np.sqrt(1.0j**3) * alpha_n
exp_term = np.exp(1.0j * n * omega * t)
J_num = special.jn(0, lambda_ * r / R) / special.jn(0, lambda_)
term_2 += c * (1.0 - J_num) * exp_term
#print(special.jn(1, lambda_))
#print(J_den)
return term_1, term_2
# this one is the same as mean_2_quasi_shear
def scale_by_shear(coef, rho, omega, mu, R1, R2):
"""Returns the scaled shear stress coefficients
from peak fourier coefficients
@param coef: fft pressure coefficients
@param rho: density
@param omega: frequency
@param mu: viscosity
@param R1: pipe radius R1
@param R2: pipe radius R2
"""
c_new = np.zeros(coef.shape, dtype=np.complex)
for n, c in enumerate(coef):
if (n==0):
c_new[0] = c*R2/R1
else:
# alpha_n_1 alpha_n_2
lambda_1 = np.sqrt(1.0j**3) * womersley_number(rho, omega, mu, R1, n)
lambda_2 = np.sqrt(1.0j**3) * womersley_number(rho, omega, mu, R2, n)
J1_1 = special.jn(1, lambda_1)
J0_1 = special.jn(0, lambda_1)
J1_2 = special.jn(1, lambda_2)
J0_2 = special.jn(0, lambda_2)
#print(lambda_1, lambda_2, J1_1, J0_1, J1_2, J0_2 )
c_new[n] = c * R2/R1 * ( 1 - J0_2 / J1_2) / ( 1 - J0_1 / J1_1)
return c_new
def scale_by_flow(coef, rho, omega, mu, R1, R2):
"""Returns the scaled mean velocity coefficients
from mean fourier coefficients at different radius
@param coef: fft pressure coefficients
@param rho: density
@param omega: frequency
@param mu: viscosity
@param R1: pipe radius R1
@param R2: pipe radius R2
"""
c_new = np.zeros(coef.shape, dtype=np.complex)
for n, c in enumerate(coef):
if (n==0):
c_new[0] = c * R1**2/R2**2
else:
# alpha_n_1 alpha_n_2
lambda_1 = np.sqrt(1.0j**3) * womersley_number(rho, omega, mu, R1, n)
lambda_2 = np.sqrt(1.0j**3) * womersley_number(rho, omega, mu, R2, n)
J1_1 = special.jn(1, lambda_1)
J0_1 = special.jn(0, lambda_1)
J1_2 = special.jn(1, lambda_2)
J0_2 = special.jn(0, lambda_2)
#print(lambda_1, lambda_2, J1_1, J0_1, J1_2, J0_2 )
c_new[n] = c * R1**2/R2**2 * ( 1 - 2.0/lambda_2 * J1_2 / J0_2) / ( 1 - 2.0/lambda_1 * J1_1 / J0_1)
return c_new
def to_grayscale(im, weights = np.c_[0.2989, 0.5870, 0.1140]):
"""
Transforms a colour image to a greyscale image by
taking the mean of the RGB values, weighted
by the matrix weights
"""
tile = np.tile(weights, reps=(im.shape[0],im.shape[1],1))
return np.sum(tile * im, axis=2)
| kayarre/Tools | womersley/womersley/utils.py | Python | bsd-2-clause | 7,020 |
# XXX It would be useful to add most of EasyDialogs here, and call
# this module EasyCocoaDialogs.py or something.
__all__ = ["AskString"]
from PyObjCTools import NibClassBuilder, AppHelper
from AppKit import NSApp
NibClassBuilder.extractClasses("AskString")
# class defined in AskString.nib
class AskStringWindowController(NibClassBuilder.AutoBaseClass):
# the actual base class is NSWindowController
# The following outlets are added to the class:
# questionLabel
# textField
def __new__(cls, question, resultCallback, default="", parentWindow=None):
self = cls.alloc().initWithWindowNibName_("AskString")
self.question = question
self.resultCallback = resultCallback
self.default = default
self.parentWindow = parentWindow
if self.parentWindow is None:
self.window().setFrameUsingName_("AskStringPanel")
self.setWindowFrameAutosaveName_("AskStringPanel")
self.showWindow_(self)
else:
NSApp().beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window(), self.parentWindow, None, None, 0)
self.retain()
return self
def windowWillClose_(self, notification):
self.autorelease()
def awakeFromNib(self):
self.questionLabel.setStringValue_(self.question)
self.textField.setStringValue_(self.default)
def done(self):
if self.parentWindow is None:
self.close()
else:
sheet = self.window()
NSApp().endSheet_(sheet)
sheet.orderOut_(self)
def ok_(self, sender):
value = self.textField.stringValue()
self.done()
self.resultCallback(value)
def cancel_(self, sender):
self.done()
self.resultCallback(None)
def AskString(question, resultCallback, default="", parentWindow=None):
AskStringWindowController(question, resultCallback, default, parentWindow)
| gt-ros-pkg/rcommander-core | nodebox_qt/src/nodebox/gui/mac/AskString.py | Python | bsd-3-clause | 1,988 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-01-12 09:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seshdash', '0004_auto_20170111_1208'),
]
operations = [
migrations.RemoveField(
model_name='report_sent',
name='report_date',
),
]
| GreatLakesEnergy/sesh-dash-beta | seshdash/migrations/0005_remove_report_sent_report_date.py | Python | mit | 403 |
"""
Unit tests for SafeSessionMiddleware
"""
from unittest.mock import call, patch, MagicMock
import ddt
from crum import set_current_request
from django.conf import settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import AnonymousUser
from django.http import HttpResponse, HttpResponseRedirect, SimpleCookie
from django.test import TestCase
from django.test.utils import override_settings
from openedx.core.djangolib.testing.utils import get_mock_request, CacheIsolationTestCase
from common.djangoapps.student.tests.factories import UserFactory
from ..middleware import (
SafeCookieData,
SafeSessionMiddleware,
mark_user_change_as_expected,
obscure_token,
track_request_user_changes
)
from .test_utils import TestSafeSessionsLogMixin
class TestSafeSessionProcessRequest(TestSafeSessionsLogMixin, TestCase):
"""
Test class for SafeSessionMiddleware.process_request
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create()
self.addCleanup(set_current_request, None)
self.request = get_mock_request()
def assert_response(self, safe_cookie_data=None, success=True):
"""
Calls SafeSessionMiddleware.process_request and verifies
the response.
Arguments:
safe_cookie_data - If provided, it is serialized and
stored in the request's cookies.
success - If True, verifies a successful response.
Else, verifies a failed response with an HTTP redirect.
"""
if safe_cookie_data:
self.request.COOKIES[settings.SESSION_COOKIE_NAME] = str(safe_cookie_data)
response = SafeSessionMiddleware().process_request(self.request)
if success:
assert response is None
assert getattr(self.request, 'need_to_delete_cookie', None) is None
else:
assert response.status_code == HttpResponseRedirect.status_code
assert self.request.need_to_delete_cookie
def assert_no_session(self):
"""
Asserts that a session object is *not* set on the request.
"""
assert getattr(self.request, 'session', None) is None
def assert_no_user_in_session(self):
"""
Asserts that a user object is *not* set on the request's session.
"""
assert self.request.session.get(SESSION_KEY) is None
def assert_user_in_session(self):
"""
Asserts that a user object *is* set on the request's session.
"""
assert SafeSessionMiddleware.get_user_id_from_session(self.request) == self.user.id
@patch("openedx.core.djangoapps.safe_sessions.middleware.LOG_REQUEST_USER_CHANGES", False)
@patch("openedx.core.djangoapps.safe_sessions.middleware.track_request_user_changes")
def test_success(self, mock_log_request_user_changes):
self.client.login(username=self.user.username, password='test')
session_id = self.client.session.session_key
safe_cookie_data = SafeCookieData.create(session_id, self.user.id)
# pre-verify steps 3, 4, 5
assert getattr(self.request, 'session', None) is None
assert getattr(self.request, 'safe_cookie_verified_user_id', None) is None
# verify step 1: safe cookie data is parsed
self.assert_response(safe_cookie_data)
self.assert_user_in_session()
# verify step 2: cookie value is replaced with parsed session_id
assert self.request.COOKIES[settings.SESSION_COOKIE_NAME] == session_id
# verify step 3: session set in request
assert self.request.session is not None
# verify steps 4, 5: user_id stored for later verification
assert self.request.safe_cookie_verified_user_id == self.user.id
# verify extra request_user_logging not called.
assert not mock_log_request_user_changes.called
@patch("openedx.core.djangoapps.safe_sessions.middleware.LOG_REQUEST_USER_CHANGES", True)
@patch("openedx.core.djangoapps.safe_sessions.middleware.track_request_user_changes")
def test_log_request_user_on(self, mock_log_request_user_changes):
self.client.login(username=self.user.username, password='test')
session_id = self.client.session.session_key
safe_cookie_data = SafeCookieData.create(session_id, self.user.id)
self.assert_response(safe_cookie_data)
assert mock_log_request_user_changes.called
def test_success_no_cookies(self):
self.assert_response()
self.assert_no_user_in_session()
def test_success_no_session(self):
safe_cookie_data = SafeCookieData.create('no_such_session_id', self.user.id)
self.assert_response(safe_cookie_data)
self.assert_no_user_in_session()
def test_success_no_session_and_user(self):
safe_cookie_data = SafeCookieData.create('no_such_session_id', 'no_such_user')
self.assert_response(safe_cookie_data)
self.assert_no_user_in_session()
def test_parse_error_at_step_1(self):
self.request.META['HTTP_ACCEPT'] = 'text/html'
with self.assert_parse_error():
self.assert_response('not-a-safe-cookie', success=False)
self.assert_no_session()
def test_invalid_user_at_step_4(self):
self.client.login(username=self.user.username, password='test')
safe_cookie_data = SafeCookieData.create(self.client.session.session_key, 'no_such_user')
self.request.META['HTTP_ACCEPT'] = 'text/html'
with self.assert_incorrect_user_logged():
self.assert_response(safe_cookie_data, success=False)
self.assert_user_in_session()
@ddt.ddt
class TestSafeSessionProcessResponse(TestSafeSessionsLogMixin, TestCase):
"""
Test class for SafeSessionMiddleware.process_response
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create()
self.addCleanup(set_current_request, None)
self.request = get_mock_request()
self.request.session = {}
self.client.response = HttpResponse()
self.client.response.cookies = SimpleCookie()
def assert_response(self, set_request_user=False, set_session_cookie=False):
"""
Calls SafeSessionMiddleware.process_response and verifies
the response.
Arguments:
set_request_user - If True, the user is set on the request
object.
set_session_cookie - If True, a session_id is set in the
session cookie in the response.
"""
if set_request_user:
self.request.user = self.user
SafeSessionMiddleware.set_user_id_in_session(self.request, self.user)
if set_session_cookie:
self.client.response.cookies[settings.SESSION_COOKIE_NAME] = "some_session_id"
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 200
def assert_response_with_delete_cookie(
self,
expect_delete_called=True,
set_request_user=False,
set_session_cookie=False,
):
"""
Calls SafeSessionMiddleware.process_response and verifies
the response, while expecting the cookie to be deleted if
expect_delete_called is True.
See assert_response for information on the other
parameters.
"""
with patch('django.http.HttpResponse.delete_cookie') as mock_delete_cookie:
self.assert_response(set_request_user=set_request_user, set_session_cookie=set_session_cookie)
assert {'sessionid', 'edx-jwt-cookie-header-payload'} \
<= {call.args[0] for call in mock_delete_cookie.call_args_list}
def test_success(self):
with self.assert_not_logged():
self.assert_response(set_request_user=True, set_session_cookie=True)
# error case is tested in TestSafeSessionMiddleware since it requires more setup
def test_confirm_user_at_step_2(self):
self.request.safe_cookie_verified_user_id = self.user.id
with self.assert_not_logged():
self.assert_response(set_request_user=True, set_session_cookie=True)
def test_anonymous_user(self):
self.request.safe_cookie_verified_user_id = self.user.id
self.request.safe_cookie_verified_session_id = '1'
self.request.user = AnonymousUser()
self.request.session[SESSION_KEY] = self.user.id
with self.assert_no_warning_logged():
self.assert_response(set_request_user=False, set_session_cookie=True)
def test_update_cookie_data_at_step_3(self):
self.assert_response(set_request_user=True, set_session_cookie=True)
serialized_cookie_data = self.client.response.cookies[settings.SESSION_COOKIE_NAME].value
safe_cookie_data = SafeCookieData.parse(serialized_cookie_data)
assert safe_cookie_data.version == SafeCookieData.CURRENT_VERSION
assert safe_cookie_data.session_id == 'some_session_id'
assert safe_cookie_data.verify(self.user.id)
def test_cant_update_cookie_at_step_3_error(self):
self.client.response.cookies[settings.SESSION_COOKIE_NAME] = None
with self.assert_invalid_session_id():
self.assert_response_with_delete_cookie(set_request_user=True)
@ddt.data(True, False)
def test_deletion_of_cookies_at_step_4(self, set_request_user):
self.request.need_to_delete_cookie = True
self.assert_response_with_delete_cookie(set_session_cookie=True, set_request_user=set_request_user)
def test_deletion_of_no_cookies_at_step_4(self):
self.request.need_to_delete_cookie = True
# delete_cookies is called even if there are no cookies set
self.assert_response_with_delete_cookie()
@ddt.ddt
class TestSafeSessionMiddleware(TestSafeSessionsLogMixin, CacheIsolationTestCase):
"""
Test class for SafeSessionMiddleware, testing both
process_request and process_response.
"""
def setUp(self):
super().setUp()
self.user = UserFactory.create()
self.addCleanup(set_current_request, None)
self.request = get_mock_request()
self.client.response = HttpResponse()
self.client.response.cookies = SimpleCookie()
def cookies_from_request_to_response(self):
"""
Transfers the cookies from the request object to the response
object.
"""
if self.request.COOKIES.get(settings.SESSION_COOKIE_NAME):
self.client.response.cookies[settings.SESSION_COOKIE_NAME] = self.request.COOKIES[
settings.SESSION_COOKIE_NAME
]
def set_up_for_success(self):
"""
Set up request for success path -- everything up until process_response().
"""
self.client.login(username=self.user.username, password='test')
session_id = self.client.session.session_key
safe_cookie_data = SafeCookieData.create(session_id, self.user.id)
self.request.COOKIES[settings.SESSION_COOKIE_NAME] = str(safe_cookie_data)
with self.assert_not_logged():
response = SafeSessionMiddleware().process_request(self.request)
# Note: setting the user here is later than it really happens, but it enables a
# semi-accurate user change tracking. The only issue is that it changes from
# None to user, rather than being logged as the first time request.user is set,
# as actually happens in Production.
self.request.user = self.user
assert response is None
assert self.request.safe_cookie_verified_user_id == self.user.id
self.cookies_from_request_to_response()
def verify_success(self):
"""
Verifies success path.
"""
self.set_up_for_success()
with self.assert_not_logged():
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 200
def test_success(self):
self.verify_success()
def test_success_from_mobile_web_view(self):
self.request.path = '/xblock/block-v1:org+course+run+type@html+block@block_id'
self.verify_success()
@override_settings(MOBILE_APP_USER_AGENT_REGEXES=[r'open edX Mobile App'])
def test_success_from_mobile_app(self):
self.request.META = {'HTTP_USER_AGENT': 'open edX Mobile App Version 2.1'}
self.verify_success()
def verify_error(self, expected_response_status):
"""
Verifies error path.
"""
self.request.COOKIES[settings.SESSION_COOKIE_NAME] = 'not-a-safe-cookie'
self.request.session = self.client.session
with self.assert_parse_error():
request_response = SafeSessionMiddleware().process_request(self.request)
assert request_response.status_code == expected_response_status
assert self.request.need_to_delete_cookie
self.cookies_from_request_to_response()
with patch('django.http.HttpResponse.delete_cookie') as mock_delete_cookie:
SafeSessionMiddleware().process_response(self.request, self.client.response)
assert {'sessionid', 'edx-jwt-cookie-header-payload'} \
<= {call.args[0] for call in mock_delete_cookie.call_args_list}
def test_error(self):
self.request.META['HTTP_ACCEPT'] = 'text/html'
self.verify_error(302)
@ddt.data(['text/html', 302], ['', 401])
@ddt.unpack
def test_error_with_http_accept(self, http_accept, expected_response):
self.request.META['HTTP_ACCEPT'] = http_accept
self.verify_error(expected_response)
@override_settings(MOBILE_APP_USER_AGENT_REGEXES=[r'open edX Mobile App'])
def test_error_from_mobile_app(self):
self.request.META = {'HTTP_USER_AGENT': 'open edX Mobile App Version 2.1'}
self.verify_error(401)
@override_settings(ENFORCE_SAFE_SESSIONS=False)
def test_warn_on_user_change_before_response(self):
"""
Verifies that when enforcement disabled, warnings are emitted and custom attributes set if
the user changes unexpectedly between request and response.
"""
self.set_up_for_success()
# But then user changes unexpectedly
self.request.user = UserFactory.create()
with self.assert_logged_for_request_user_mismatch(self.user.id, self.request.user.id, 'warning', '/', False):
with patch('openedx.core.djangoapps.safe_sessions.middleware.set_custom_attribute') as mock_attr:
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 200
set_attr_call_args = [call.args for call in mock_attr.call_args_list]
assert ("safe_sessions.user_mismatch", "request-response-mismatch") in set_attr_call_args
def test_enforce_on_user_change_before_response(self):
"""
Copy of test_warn_on_user_change_before_response but with enforcement enabled (default).
The differences should be the status code and the session deletion.
"""
self.set_up_for_success()
assert SafeSessionMiddleware.get_user_id_from_session(self.request) is not None
# But then user changes unexpectedly
self.request.user = UserFactory.create()
with self.assert_logged_for_request_user_mismatch(self.user.id, self.request.user.id, 'warning', '/', False):
with patch('openedx.core.djangoapps.safe_sessions.middleware.set_custom_attribute') as mock_attr:
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 401
assert SafeSessionMiddleware.get_user_id_from_session(self.request) is None # session cleared
set_attr_call_args = [call.args for call in mock_attr.call_args_list]
assert ("safe_sessions.user_mismatch", "request-response-mismatch") in set_attr_call_args
@override_settings(ENFORCE_SAFE_SESSIONS=False)
def test_warn_on_user_change_from_session(self):
"""
Verifies that warnings are emitted and custom attributes set if
the user in the request does not match the user in the session.
"""
self.set_up_for_success()
different_user = UserFactory()
SafeSessionMiddleware.set_user_id_in_session(self.request, different_user)
with self.assert_logged_for_session_user_mismatch(self.user.id, different_user.id, self.request.path,
False):
with patch('openedx.core.djangoapps.safe_sessions.middleware.set_custom_attribute') as mock_attr:
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 200
set_attr_call_args = [call.args for call in mock_attr.call_args_list]
assert ("safe_sessions.user_mismatch", "request-session-mismatch") in set_attr_call_args
@override_settings(ENFORCE_SAFE_SESSIONS=False)
def test_warn_on_user_change_in_both(self):
"""
Verifies that warnings are emitted and custom attributes set if
the user in the initial request does not match the user at response time or the user in the session.
"""
self.set_up_for_success()
different_user = UserFactory()
SafeSessionMiddleware.set_user_id_in_session(self.request, different_user)
self.request.user = UserFactory.create()
with self.assert_logged_for_both_mismatch(self.user.id, different_user.id,
self.request.user.id, self.request.path, False):
with patch('openedx.core.djangoapps.safe_sessions.middleware.set_custom_attribute') as mock_attr:
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 200
set_attr_call_args = [call.args for call in mock_attr.call_args_list]
assert ("safe_sessions.user_mismatch", "request-response-and-session-mismatch") in set_attr_call_args
@patch("openedx.core.djangoapps.safe_sessions.middleware.LOG_REQUEST_USER_CHANGES", True)
@patch("openedx.core.djangoapps.safe_sessions.middleware.set_custom_attribute")
def test_warn_with_verbose_logging(self, mock_set_custom_attribute):
self.set_up_for_success()
self.request.user = UserFactory.create()
with self.assert_logged('SafeCookieData: Changing request user. ', log_level='warning'):
SafeSessionMiddleware().process_response(self.request, self.client.response)
mock_set_custom_attribute.assert_has_calls([call('safe_sessions.user_id_list', '1,2')])
@patch("openedx.core.djangoapps.safe_sessions.middleware.LOG_REQUEST_USER_CHANGES", False)
def test_warn_without_verbose_logging(self):
self.set_up_for_success()
self.request.user = UserFactory.create()
with self.assert_regex_not_logged('SafeCookieData: Changing request user. ', log_level='warning'):
SafeSessionMiddleware().process_response(self.request, self.client.response)
@override_settings(LOG_REQUEST_USER_CHANGE_HEADERS=True)
@patch("openedx.core.djangoapps.safe_sessions.middleware.LOG_REQUEST_USER_CHANGES", True)
@patch("openedx.core.djangoapps.safe_sessions.middleware.cache")
def test_user_change_with_header_logging(self, mock_cache):
self.set_up_for_success()
self.request.user = UserFactory.create()
with self.assert_logged('SafeCookieData: Changing request user. ', log_level='warning'):
SafeSessionMiddleware().process_response(self.request, self.client.response)
# Note: Since the test cache is not retaining its values for some reason, we'll
# simply assert that the cache is set (here) and checked (below).
mock_cache.set_many.assert_called_with(
{
'safe_sessions.middleware.recent_user_change_detected_1': True,
'safe_sessions.middleware.recent_user_change_detected_2': True
}, 300
)
# send successful request; request header should be logged for earlier mismatched user id
self.set_up_for_success()
SafeSessionMiddleware().process_response(self.request, self.client.response)
# Note: The test cache is not returning True because it is not retaining its values
# for some reason. Rather than asserting that we log the header appropriately, we'll
# simply verify that we are checking the cache.
mock_cache.get.assert_called_with('safe_sessions.middleware.recent_user_change_detected_1', False)
@override_settings(LOG_REQUEST_USER_CHANGE_HEADERS=True)
@patch("openedx.core.djangoapps.safe_sessions.middleware.LOG_REQUEST_USER_CHANGES", True)
def test_no_request_user_with_header_logging(self):
"""
Test header logging enabled with request not containing a user object.
Notes:
* In Production, failures happened for some requests that did not have
request.user set, so we test this case here.
* An attempt at creating a unit test for an anonymous user started
failing due to a missing request.session, which never happens in
Production, so this case assumes a working session object.
"""
self.request.session = MagicMock()
del self.request.user
with self.assert_not_logged():
SafeSessionMiddleware().process_response(self.request, self.client.response)
def test_no_warn_on_expected_user_change(self):
"""
Verifies that no warnings is emitted when the user change is expected.
This might happen on a login, for example.
"""
self.set_up_for_success()
# User changes...
new_user = UserFactory.create()
self.request.user = new_user
# ...but so does session, and view sets a flag to say it's OK.
mark_user_change_as_expected(new_user.id)
with self.assert_no_warning_logged():
with patch('openedx.core.djangoapps.safe_sessions.middleware.set_custom_attribute') as mock_attr:
response = SafeSessionMiddleware().process_response(self.request, self.client.response)
assert response.status_code == 200
assert 'safe_sessions.user_mismatch' not in [call.args[0] for call in mock_attr.call_args_list]
class TestTokenObscuring(TestCase):
"""
Test the ability to obscure session IDs.
"""
def test_obscure_none(self):
"""Doesn't break on None input."""
assert obscure_token(None) is None
def test_obscure_vary(self):
"""
Verifies that input and SECRET_KEY both change output.
The "expected" values here are computed and arbitrary; if the
algorithm is updated, the expected values should also be
updated. (Changing the algorithm will invalidate any stored
data, but the docstring explicitly warns not to store these
outputs anyhow.)
"""
with override_settings(SECRET_KEY="FIRST SECRET"):
# Same input twice, same output twice
assert obscure_token('abcdef-123456') == '2d4260b0'
assert obscure_token('abcdef-123456') == '2d4260b0'
# Different input, different output
assert obscure_token('zxcvbnm-000111') == '87978f29'
# Different key, different output
with override_settings(SECRET_KEY="SECOND SECRET"):
assert obscure_token('abcdef-123456') == '7325d529'
@ddt.ddt
class TestTrackRequestUserChanges(TestCase):
"""
Test the function that instruments a request object.
Ensure that we are logging changes to the 'user' attribute and
that the correct messages are written.
"""
def test_initial_user_setting_tracking(self):
request = get_mock_request()
del request.user
track_request_user_changes(request)
request.user = UserFactory.create()
assert "Setting for the first time" in request.debug_user_changes[0]
def test_user_change_logging(self):
request = get_mock_request()
original_user = UserFactory.create()
new_user = UserFactory.create()
request.user = original_user
track_request_user_changes(request)
# Verify that we don't log if set to same as current value.
request.user = original_user
assert len(request.debug_user_changes) == 0
# Verify logging on change.
request.user = new_user
assert len(request.debug_user_changes) == 1
assert f"Changing request user. Originally {original_user.id!r}" in request.debug_user_changes[0]
assert f"will become {new_user.id!r}" in request.debug_user_changes[0]
# Verify change back logged.
request.user = original_user
assert len(request.debug_user_changes) == 2
expected_msg = f"Originally {original_user.id!r}, now {new_user.id!r} and will become {original_user.id!r}"
assert expected_msg in request.debug_user_changes[1]
def test_user_change_with_no_ids(self):
request = get_mock_request()
del request.user
track_request_user_changes(request)
request.user = object()
assert "Setting for the first time, but user has no id" in request.debug_user_changes[0]
request.user = object()
assert len(request.debug_user_changes) == 2
assert "Changing request user but user has no id." in request.debug_user_changes[1]
| eduNEXT/edx-platform | openedx/core/djangoapps/safe_sessions/tests/test_middleware.py | Python | agpl-3.0 | 25,792 |
from optparse import make_option
import traceback
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
show_traceback = options.get('traceback')
load_initial_data = options.get('load_initial_data')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError as exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database')
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
# Create the tables for each model
if verbosity >= 1:
self.stdout.write("Creating tables ...\n")
with transaction.commit_on_success_unless_managed(using=db):
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
self.stdout.write("Creating table %s\n" % model._meta.db_table)
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
self.stdout.write("Installing custom SQL ...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
self.stdout.write("Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=db):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
traceback.print_exc()
else:
if verbosity >= 3:
self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
if verbosity >= 1:
self.stdout.write("Installing indexes ...\n")
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
self.stdout.write("Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=db):
for sql in index_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
call_command('loaddata', 'initial_data', verbosity=verbosity,
database=db, skip_validation=True)
| postrational/django | django/core/management/commands/syncdb.py | Python | bsd-3-clause | 8,228 |
import pytest
# TODO: use same globals for reverse operations such as add, remove
GRAPHS = [
({},
[],
[]),
({'nodeA': {}},
['nodeA'],
[]),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
['nodeA', 'nodeB'],
[('nodeA', 'nodeB')]),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
['nodeA', 'nodeB'],
[('nodeA', 'nodeB'), ('nodeB', 'nodeA')]),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}},
['nodeA', 'nodeB', 'nodeC'],
[('nodeA', 'nodeB'),
('nodeA', 'nodeC'),
('nodeB', 'nodeA'),
('nodeC', 'nodeA'),
('nodeC', 'nodeC')]),
]
GRAPHS_FOR_NODE_INSERT = [
({},
'nodeN',
{'nodeN': {}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}},
'nodeN',
{'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeN': {}}),
({'nodeA': {'nodeA': 'weight', 'nodeB': 'weight'},
'nodeB': {'nodeC': 'weight', 'nodeA': 'weight'}},
'nodeN',
{'nodeA': {'nodeA': 'weight', 'nodeB': 'weight'},
'nodeB': {'nodeC': 'weight', 'nodeA': 'weight'},
'nodeN': {}}),
]
GRAPHS_ADD_EDGE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
"nodeX",
"nodeY",
{'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}}),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
'nodeA',
'nodeB',
{'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}},
'nodeB',
'nodeC',
{'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight', 'nodeC': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}}),
]
GRAPHS_DEL_NODE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}},
'nodeA',
{'nodeB': {},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}}),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
'nodeB',
{'nodeA': {}}),
]
GRAPHS_DEL_EDGE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA',
'nodeB',
{'nodeA': {},
'nodeB': {}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {},
'nodeC': {}},
'nodeA',
'nodeB',
{'nodeA': {'nodeC': 'weight'},
'nodeB': {},
'nodeC': {}})
]
NEIGHBORS = [
({'nodeA': {},
'nodeB': {'nodeA': 'weight'}},
'nodeB',
['nodeA']),
({'nodeA': {},
'nodeB': {'nodeA': 'weight'}},
'nodeA',
[]),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight'}},
'nodeA',
['nodeB', 'nodeC']),
]
ADJACENT = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA',
'nodeB',
True),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeB',
'nodeA',
False),
]
ADJACENT_NODES_GONE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeX', 'nodeB'),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeX', 'nodeY'),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA', 'nodeY'),
]
NODE_TRAVERSAL_BREADTH = [
({'A': {'B': 'weight', 'C': 'weight'},
'B': {'A': 'weight', 'D': 'weight', 'E': 'weight'},
'C': {'A': 'weight', 'F': 'weight', 'G': 'weight'},
'D': {'B': 'weight', 'H': 'weight'},
'E': {'B': 'weight'},
'F': {'C': 'weight'},
'G': {'C': 'weight'},
'H': {'D': 'weight'}},
'A',
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']),
({'A': {'B': 'weight', 'C': 'weight'},
'B': {'C': 'weight', 'D': 'weight'},
'C': {},
'D': {}},
'A',
['A', 'B', 'C', 'D']),
({'a': {}}, 'a', ['a']),
]
NODE_TRAVERSAL_DEPTH = [
({'A': {'B': 'weight', 'E': 'weight'},
"B": {'C': 'weight', 'D': 'weight'},
'E': {},
'C': {},
'D': {}},
'A',
['A', 'E', 'B', 'D', 'C']),
({'A': {'B': 'weight', 'E': 'weight'},
"B": {'C': 'weight', 'D': 'weight'},
'E': {},
'C': {'A': 'weight', 'E': 'weight'},
'D': {}},
'A',
['A', 'E', 'B', 'D', 'C']),
({'a': {'b': 'weight', 'g': 'weight'},
'b': {'c': 'weight'},
'g': {'h': 'weight', 'j': 'weight'},
'c': {'d': 'weight'},
'h': {'i': 'weight'},
'j': {'k': 'weight'},
'd': {'e': 'weight', 'f': 'weight'},
'i': {},
'k': {},
'e': {},
'f': {}},
'a',
['a', 'g', 'j', 'k', 'h', 'i', 'b', 'c', 'd', 'f', 'e']),
({'a': {}}, 'a', ['a']),
]
GET_WEIGHT = [
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'A',
'B',
'weight1',),
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'B',
'C',
'weight3',),
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'B',
'D',
'weight4',),
]
@pytest.fixture
def graph_fixture(scope='function'):
from graph import Graph
return Graph()
@pytest.mark.parametrize(("built_graph", "node", "expected"), GRAPHS_DEL_NODE)
def test_del_node_exists(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
graph_fixture.del_node(node)
assert graph_fixture._container == expected
@pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS)
def test_nodes(graph_fixture, built_graph, node_list, edge_list):
graph_fixture._container = built_graph
result = graph_fixture.nodes()
assert set(result) == set(node_list)
@pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS)
def test_edges(graph_fixture, built_graph, node_list, edge_list):
graph_fixture._container = built_graph
result = graph_fixture.edges()
assert set(edge_list) == set(result)
@pytest.mark.parametrize(("built_graph", "new_node", "expected"),
GRAPHS_FOR_NODE_INSERT)
def test_add_node(graph_fixture, built_graph, new_node, expected):
graph_fixture._container = built_graph
graph_fixture.add_node(new_node)
assert graph_fixture._container == expected
@pytest.mark.parametrize(("built_graph", "n1", "n2", "expected"),
GRAPHS_ADD_EDGE)
def test_add_edge(graph_fixture, built_graph, n1, n2, expected):
graph_fixture._container = built_graph
graph_fixture.add_edge(n1, n2)
assert graph_fixture._container == expected
def test_del_node_not_exists(graph_fixture):
graph_fixture._container = {'nodeA': {'nodeA': 'weight'}, 'nodeB': {}}
with pytest.raises(KeyError):
graph_fixture.del_node('nodeX')
@pytest.mark.parametrize(("built_graph", "node1", "node2", "expected"),
GRAPHS_DEL_EDGE)
def test_del_edge(graph_fixture, built_graph, node1, node2, expected):
graph_fixture._container = built_graph
graph_fixture.del_edge(node1, node2)
assert graph_fixture._container == expected
def test_del_edge_not_exists(graph_fixture):
graph_fixture._container = {'nodeA': {}}
with pytest.raises(ValueError):
graph_fixture.del_edge('nodeA', 'nodeB')
def test_has_node_true(graph_fixture):
graph_fixture._container = {'nodeA': {}}
assert graph_fixture.has_node('nodeA')
def test_has_node_false(graph_fixture):
graph_fixture._container = {'nodeA': {}}
assert not graph_fixture.has_node('nodeB')
@pytest.mark.parametrize(("built_graph", 'node', 'expected'), NEIGHBORS)
def test_neighbors(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
assert set(graph_fixture.neighbors(node)) == set(expected)
def test_neighbors_none(graph_fixture):
graph_fixture._container = {'nodeA': {}}
with pytest.raises(KeyError):
graph_fixture.neighbors('nodeB')
@pytest.mark.parametrize(('built_graph', 'n1', 'n2', 'expected'), ADJACENT)
def test_adjacent(graph_fixture, built_graph, n1, n2, expected):
# if n1, n2 don't exist: raise error
graph_fixture._container = built_graph
assert graph_fixture.adjacent(n1, n2) == expected
@pytest.mark.parametrize(('built_graph', 'n1', 'n2'), ADJACENT_NODES_GONE)
def test_adjacent_not_exists(graph_fixture, built_graph, n1, n2):
# if n1, n2 don't exist: raise error
graph_fixture._container = built_graph
with pytest.raises(KeyError):
graph_fixture.adjacent(n1, n2)
@pytest.mark.parametrize(('built_graph', 'node', 'expected'), NODE_TRAVERSAL_BREADTH)
def test_traverse_breadth(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
assert graph_fixture.breadth_first_traversal(node) == expected
def test_empty_graph_breadth(graph_fixture):
graph_fixture._container = {}
with pytest.raises(IndexError):
graph_fixture.breadth_first_traversal('X')
@pytest.mark.parametrize(('built_graph', 'node', 'expected'), NODE_TRAVERSAL_DEPTH)
def test_traverse_depth(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
assert graph_fixture.depth_first_traversal(node) == expected
def test_traverse_depth_empty(graph_fixture):
graph_fixture._container = {}
with pytest.raises(IndexError):
graph_fixture.depth_first_traversal('node')
@pytest.mark.parametrize(('built_graph', 'n1', 'n2', 'expected'), GET_WEIGHT)
def test_get_weight(graph_fixture, built_graph, n1, n2, expected):
graph_fixture._container = built_graph
assert graph_fixture.get_weight(n1, n2) == expected
| palindromed/data-structures | src/test_graph.py | Python | mit | 10,017 |
import re
from django.utils import translation
import requests
from requests.exceptions import RequestException
import olympia.core.logger
from olympia.amo.celery import task
from olympia.amo.decorators import write
from olympia.files.models import (
File, WebextPermission, WebextPermissionDescription)
from olympia.files.utils import parse_xpi
from olympia.translations.models import Translation
log = olympia.core.logger.getLogger('z.files.task')
@task
@write
def extract_webext_permissions(ids, **kw):
log.info('[%s@%s] Extracting permissions from Files, starting at id: %s...'
% (len(ids), extract_webext_permissions.rate_limit, ids[0]))
files = File.objects.filter(pk__in=ids).no_transforms()
for file_ in files:
try:
log.info('Parsing File.id: %s @ %s' %
(file_.pk, file_.current_file_path))
parsed_data = parse_xpi(file_.current_file_path, check=False)
permissions = parsed_data.get('permissions', [])
# Add content_scripts host matches too.
for script in parsed_data.get('content_scripts', []):
permissions.extend(script.get('matches', []))
if permissions:
log.info('Found %s permissions for: %s' %
(len(permissions), file_.pk))
WebextPermission.objects.update_or_create(
defaults={'permissions': permissions}, file=file_)
except Exception, err:
log.error('Failed to extract: %s, error: %s' % (file_.pk, err))
WEBEXTPERMS_DESCRIPTION_REGEX = r'^webextPerms\.description\.(.+)=(.+)'
@task
@write
def update_webext_descriptions_all(primary, additional, **kw):
"""primary is a (url, locale) tuple; additional is a list of tuples."""
url, locale = primary
update_webext_descriptions(url, locale)
for url, locale in additional:
update_webext_descriptions(url, locale, create=False)
def update_webext_descriptions(url, locale='en-US', create=True, **kw):
class DummyContextManager(object):
def __enter__(self):
pass
def __exit__(*x):
pass
log.info('Updating webext permission descriptions in [%s] from %s' %
(locale, url))
try:
response = requests.get(url)
response.raise_for_status()
except RequestException as e:
log.warning('Error retrieving %s: %s' % (url, e))
return
# We only need to activate the locale for creating new permission objects.
context = translation.override(locale) if create else DummyContextManager()
with context:
for line in response.text.splitlines():
match = re.match(WEBEXTPERMS_DESCRIPTION_REGEX, line)
if match:
(perm, description) = match.groups()
description = description.replace('%S', u'Firefox')
if create:
log.info(u'Adding permission "%s" = "%s"' %
(perm, description))
WebextPermissionDescription.objects.update_or_create(
name=perm, defaults={'description': description})
else:
log.info(u'Updating permission "%s" = "%s" for [%s]' %
(perm, description, locale))
try:
perm_obj = WebextPermissionDescription.objects.get(
name=perm)
Translation.objects.update_or_create(
id=perm_obj.description_id, locale=locale.lower(),
defaults={'localized_string': description})
except WebextPermissionDescription.DoesNotExist:
log.warning('No "%s" permission found to update with '
'[%s] locale' % (perm, locale))
| harikishen/addons-server | src/olympia/files/tasks.py | Python | bsd-3-clause | 3,907 |
## IMPORT MODULES REQURED FOR FILE ##
import os, sys # OS/System info.
import tty, termios # Used for polling keyboard.
import random, time # Used for fun or pain randomness && timing.
## DEFINE GLOBAL VARIABLES ##
last_key_press_time = time.time() # Time since last key press.
##
## FUNCTION DEFINTION => DO NOTHING
##
## PURPOSE: Mainly a placeholder function. Called when 'nothing' occurs due to button press.
##
## INPUTS: NONE.
##
## RETURNS: NONE.
##
def do_nothing():
# Print notification message that nothing important happened.
print "Nothing To See Here.\r"
# :: EXIT ::
return
##
## FUNCTION DEFINTION => DO NOTHING
##
## PURPOSE: Mainly a placeholder function. Called when 'nothing' occurs due to button press.
##
## INPUTS: NONE.
##
## RETURNS: NONE.
##
def pain():
# Print notification message that the pain is beginning.
print "Entering The House Of Pain.\r"
## TO DO => TOGGLE STATUS OF INSERT [LED] ##
# :: EXIT ::
return
##
## FUNCTION DEFINTION => DO NOTHING
##
## PURPOSE: Mainly a placeholder function. Called when 'nothing' occurs due to button press.
##
## INPUTS: NONE.
##
## RETURNS: NONE.
##
def fun():
# Print notification message that the fun is going to be attempted.
print "Attempting Fun Times.\r"
## TO DO => TOGGLE STATUS OF NUMLOCK [LED] ##
# :: EXIT ::
return
##
## FUNCTION DEFINTION => FUN OR PAIN
##
## PURPOSE: Randomly determine if time for fun or pain.
##
## INPUTS: NONE.
##
## RETURNS: NONE.
##
def fun_or_pain():
# Declare global variables used in function.
global last_key_press_time
# Declare/Initialize Local Vars.
required_wait_time = 1 # Required time (in seconds) to wait between button presses.
# Get current timestamp.
timestamp = time.time()
# Map functions from random number results to function calls.
# NOTE: to skew/alter probabilities, simply add extra items to list [unique numbers (indexes) with duplicate functions (vars)].
fun_or_pain_chance = {
1 : fun,
2 : pain,
3 : fun,
4 : do_nothing
}
# :: CHECK :: Verify enough time has elapsed between button presses. ::
if (timestamp - last_key_press_time) < required_wait_time:
# Invoke the PAIN.
pain()
else:
# Get random integer value to determine what should be done:
fp = random.randint(fun_or_pain_chance.keys()[0], len(fun_or_pain_chance))
# Execute Fun or Pain.
fun_or_pain_chance[fp]()
# Record current timestamp for reference on next function call.
last_key_press_time = timestamp
# :: EXIT ::
return
##
## FUNCTION DEFINTION => POLL KEYBOARD
##
## PURPOSE: Poll keyboard and return value for every keypress.
##
## INPUTS: 'mode_hex' => When True returns HEX value for character found.
##
## RETURNS: Character as ascii || hex [based on flag].
##
def poll_kbd(mode_hex=False):
# Define file descriptor.
fd = sys.stdin.fileno()
# Recurrent current file descriptor settings for future reference.
fd_settings_old = termios.tcgetattr(fd)
# Use try-catch-finally method for grabbing data from keypress.
try:
# Change mode of file descriptor to raw.
tty.setraw(sys.stdin.fileno())
# Read in character from standard input.
ch = sys.stdin.read(1)
finally:
# Character read successfully.
# Return file descriptor to original settings.
termios.tcsetattr(fd, termios.TCSADRAIN, fd_settings_old)
# Return value read in.
if mode_hex is True:
return ch.encode('hex')
else:
return ch
# :: EXIT ::
return ""
##
## FUNCTION DEFINTION => MAIN APPLICATION
##
## PURPOSE: Continually poll keyboard and invoke 'fun_or_pain' when a valid key is pressed.
##
## INPUTS: 'mode_hex' => When True returns HEX value for character found.
##
## RETURNS: Character as ascii || hex [based on flag].
##
def main():
# Declare variables used in this function.
escape_key = '03'
button_key_sequence_00 = ['1b', '5b', '32', '7e'] # KEY-LIST => INSERT
button_key_sequence_01 = ['1b', '5b', '33', '7e'] # KEY-LIST => DELETE
button_key_index = 0x00 # INDEX INTO KEY-LIST
# Infinite loop for application.
while True:
# Poll for keypress.
key = poll_kbd(True)
# Check to see if 'CTRL+c' [escape] was requested.
if key == escape_key:
break
# :: CHECK :: See if current keypress corresponds to return value from button 00. ::
if button_key_sequence_00[button_key_index] == key:
# :: CHECK :: See if full sequence has been reached for keypress. ::
if (len(button_key_sequence_00) - 1) == button_key_index:
# Key has been found; Reset index.
button_key_index = 0x00
# Time to see if fun time or pain time.
fun_or_pain()
# More remains in key list -> increment index.
else:
button_key_index += 1
# :: CHECK :: See if current keypress corresponds to return value from button 01. ::
elif button_key_sequence_01[button_key_index] == key:
# :: CHECK :: See if full sequence has been reached for keypress. ::
if (len(button_key_sequence_01) - 1) == button_key_index:
# Key has been found; Reset index.
button_key_index = 0x00
# Time to see if fun time or pain time.
fun_or_pain()
# More remains in key list -> increment index.
else:
button_key_index += 1
# Incorrect key pressed -> reset index.
else:
button_key_index = 0x00
# :: EXIT ::
return
## INVOKE MAIN FUNCTION ##
main()
| NegativeK/telehack | schrodengers-boxo-fun-or-pain/src/schrodengers-boxo-fun-or-pain.py | Python | agpl-3.0 | 5,464 |
#!/usr/bin/python
# setup the ability to import modules from a sibling directory
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import optparse
from cryptochallenge import stringmanip, ciphers
def main():
parser = optparse.OptionParser()
options, args = parser.parse_args()
if(stringmanip.isHexString(args[0])):
top3_results = ciphers.reverseOneByteXOR(args[0])
print("top 3 identified results:")
for i in top3_results:
print(i)
print(stringmanip.bytearrayToUTF8Str(top3_results[-1]['likely_res_ba']))
print("winning key:")
print(top3_results[-1]['key_chr'])
else:
print("input is not hex")
if __name__ == '__main__':
main()
| dctelf/poisonous-mushroom | scripts/byte-xor-cipher.py | Python | apache-2.0 | 737 |
################################################################################
# Copyright (C) 2015-2020 by Last Run Contributors.
#
# This file is part of Arena of Titans.
#
# Arena of Titans is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Arena of Titans is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Arena of Titans. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from unittest.mock import MagicMock
import pytest
from aot.game.board import Color
from aot.game.trumps import (
CannotBeAffectedByTrumps,
ModifyCardColors,
ModifyCardNumberMoves,
ModifyNumberMoves,
ModifyTrumpDurations,
PreventTrumpAction,
RemoveColor,
Teleport,
TrumpsList,
)
from aot.game.trumps.exceptions import MaxNumberAffectingTrumpsError, TrumpHasNoEffectError
from ...utilities import AnyFunction
@pytest.fixture()
def initiator(player):
return player
@pytest.fixture()
def target(player2):
return player2
@pytest.fixture()
def red_tower_trump():
return RemoveColor(name="Tower", color=Color.RED, cost=4, duration=1, must_target_player=True)
@pytest.fixture()
def red_fortress_trump():
return RemoveColor(
name="Fortress", color=Color.RED, cost=4, duration=2, must_target_player=True
)
@pytest.fixture()
def blizzard_trump():
return ModifyNumberMoves(
delta_moves=-1, duration=1, cost=4, name="Blizzard", must_target_player=True
)
def test_affect_modify_number_moves(initiator, target):
target.modify_number_moves = MagicMock()
trump = ModifyNumberMoves(delta_moves=1, duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
target.modify_number_moves.assert_called_once_with(1)
def test_affect_modify_number_moves_negative_delta(initiator, target):
target.modify_number_moves = MagicMock()
trump = ModifyNumberMoves(delta_moves=-1, duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
target.modify_number_moves.assert_called_once_with(-1)
def test_affect_modify_card_colors(initiator, target):
target.modify_card_colors = MagicMock()
trump = ModifyCardColors(colors={Color.BLACK}, duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
target.modify_card_colors.assert_called_once_with({Color.BLACK}, filter_=AnyFunction())
def test_affect_modify_card_colors_with_filter(target, initiator):
target.modify_card_colors = MagicMock()
trump = ModifyCardColors(colors={Color.BLACK}, card_names=["Queen"], duration=1)
queen = MagicMock()
queen.name = "Queen"
king = MagicMock()
king.name = "King"
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
assert target.modify_card_colors.called
assert target.modify_card_colors.call_args[0][0] == {Color.BLACK}
assert callable(target.modify_card_colors.call_args[1]["filter_"])
filter_ = target.modify_card_colors.call_args[1]["filter_"]
assert filter_(queen)
assert not filter_(king)
def test_affect_modify_card_number_moves(initiator, target):
target.modify_card_number_moves = MagicMock()
trump = ModifyCardNumberMoves(delta_moves=1, duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
target.modify_card_number_moves.assert_called_once_with(1, filter_=AnyFunction())
def test_affect_modify_card_number_moves_with_filter_(target, initiator):
target.modify_card_number_moves = MagicMock()
trump = ModifyCardNumberMoves(delta_moves=1, duration=1, card_names=["Queen"])
queen = MagicMock()
queen.name = "Queen"
king = MagicMock()
king.name = "King"
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
assert target.modify_card_number_moves.called
assert target.modify_card_number_moves.call_args[0][0] == 1
assert callable(target.modify_card_number_moves.call_args[1]["filter_"])
filter_ = target.modify_card_number_moves.call_args[1]["filter_"]
assert filter_(queen)
assert not filter_(king)
def test_affect_modify_trump_effects_durations(initiator, target): # noqa: F811
target.modify_trump_effects_durations = MagicMock()
trump = ModifyTrumpDurations(delta_duration=-1, duration=1)
with pytest.raises(TrumpHasNoEffectError):
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
assert not target.modify_trump_effects_durations.called
def test_affect_modify_trump_effects_durations_with_filter_(
initiator, target, red_tower_trump, blizzard_trump
): # noqa: F811
trump = ModifyTrumpDurations(delta_duration=-1, duration=1, trump_names=("Tower",))
tower_effect = red_tower_trump.create_effect(initiator=initiator, target=target, context={})
blizzard_effect = blizzard_trump.create_effect(initiator=initiator, target=target, context={})
target._trump_effects = [
tower_effect,
blizzard_effect,
]
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
assert tower_effect.duration == 0
assert blizzard_effect.duration == 1
assert target._trump_effects == [blizzard_effect]
def test_prevent_trump_action_dont_enable_on_relevant_trump(
player, player2, red_fortress_trump
): # noqa: F811
"""Test that we only prevent the action of proper trumps.
GIVEN: a prevent action trump enabled on 'Tower' trumps to prevent the 'Ram' trump for player.
GIVEN: a Fortress trump that affects player 2
GIVEN: a Ram trump for player 2
WHEN: player 2 plays the Ram trump to cancel the Fortress
THEN: it works.
"""
# Setup player.
prevent_action_trump = PreventTrumpAction(
name="Impassable Trump",
prevent_trumps_to_modify=["Ram"],
enable_for_trumps=["Tower"],
)
player._available_trumps = TrumpsList([red_fortress_trump])
effect = prevent_action_trump.create_effect(initiator=player, target=player, context={})
effect.apply()
fortress = player._available_trumps["Fortress", Color.RED]
player._can_play = True
player.play_trump(fortress, target=player2, context={})
# Setup trump to play.
ram = ModifyTrumpDurations(
name="Ram",
trump_names=("Tower", "Fortress"),
duration=1,
delta_duration=-1,
)
ram.create_effect(initiator=player2, target=player2, context={}).apply()
fortress_effect = player2.trump_effects[0]
assert len(player2.trump_effects) == 1
assert fortress_effect.duration == 1
ram.create_effect(initiator=player2, target=player2, context={}).apply()
assert fortress_effect.duration == 0
assert player2.trump_effects == ()
def test_remove_color(initiator, target): # noqa: F811
target.deck.remove_color_from_possible_colors = MagicMock()
color = Color.RED
trump = RemoveColor(color=color, duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
target.deck.remove_color_from_possible_colors.assert_called_once_with(color)
def test_remove_all_colors(initiator, target): # noqa: F811
target.deck.remove_color_from_possible_colors = MagicMock()
trump = RemoveColor(color=Color["ALL"], duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
target.deck.remove_color_from_possible_colors.assert_called_once_with(Color["ALL"])
def test_remove_multiple_colors(initiator, target):
target.deck.remove_color_from_possible_colors = MagicMock()
colors = {Color.BLACK, Color.RED}
trump = RemoveColor(colors=colors, duration=1)
effect = trump.create_effect(initiator=initiator, target=target, context={})
effect.apply()
assert target.deck.remove_color_from_possible_colors.called
assert target.deck.remove_color_from_possible_colors.call_count == len(colors)
def test_teleport_no_target_square(board, player): # noqa: F811
player.move = MagicMock()
trump = Teleport(distance=1, color=Color.BLACK)
trump.create_effect(
initiator=player, target=player, context={"board": board, "square": None}
).apply()
assert not player.move.called
def test_teleport_wrong_distance(board, player): # noqa: F811
player.move = MagicMock()
trump = Teleport(distance=1, color=Color.BLACK)
square = board[0, 0]
trump.create_effect(
initiator=player, target=player, context={"board": board, "square": square}
).apply()
assert not player.move.called
def test_teleport_wrong_color(board, player): # noqa: F811
player.move = MagicMock()
trump = Teleport(distance=1, color=Color.BLUE)
square = board[0, 7]
trump.create_effect(
initiator=player, target=player, context={"board": board, "square": square}
).apply()
assert not player.move.called
def test_teleport(board, player): # noqa: F811
player.move = MagicMock()
square = board[6, 8]
trump = Teleport(distance=1, color=square.color)
trump.create_effect(
initiator=player, target=player, context={"board": board, "square": square}
).apply()
player.move.assert_called_once_with(square)
def test_player_can_only_be_affected_by_max_trump_effects_number_trump(
target, initiator
): # noqa: F811
for i in range(target.MAX_NUMBER_AFFECTING_TRUMPS):
trump = RemoveColor(colors=[Color["BLACK"]], duration=1)
initiator.can_play = True
initiator._number_trumps_played = 0
initiator.play_trump(trump, target=target, context={})
assert len(target.trump_effects) == i + 1
trump = RemoveColor(colors=[Color["BLACK"]], duration=1)
with pytest.raises(MaxNumberAffectingTrumpsError):
initiator.can_play = True
initiator._number_trumps_played = 0
initiator.play_trump(trump, target=target, context={})
assert len(target.trump_effects) == target.MAX_NUMBER_AFFECTING_TRUMPS
def test_cannot_be_affected_by_trump_empty_list_names(player, player2): # noqa: F811
trump = CannotBeAffectedByTrumps(trump_names=[])
assert not trump.allow_trump_to_affect(None)
trump = CannotBeAffectedByTrumps(trump_names=None)
assert not trump.allow_trump_to_affect(None)
| arenaoftitans/arena-of-titans-api | tests/game/trumps/test_trumps.py | Python | agpl-3.0 | 10,977 |
from .core import flespi_receiver
from .stdout_handler import stdout_handler_class
from .wialon_retranslator_handler import wialon_retranslator_handler_class
from .aws_iot_handler import aws_iot_handler_class
from .http_handler import http_handler_class
| janbartnitsky/flespi_receiver | flespi_receiver/__init__.py | Python | mit | 254 |
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
import Shadow
import numpy
from srxraylib.sources import srfunc
import matplotlib
from matplotlib import pylab as plt
import os
matplotlib.rcParams.update({'font.size': 8})
def wiggler_preprocessor(ener_gev=6.0,e_min=5000.0,e_max=5005.0,file_field="",plot_trajectories=0,
shift_x_flag=0,shift_x_value=0.0,shift_betax_flag=0,shift_betax_value=0.0):
(traj, pars) = srfunc.wiggler_trajectory(b_from=1,
inData=file_field,
nPer=1,
nTrajPoints=501,
ener_gev=ener_gev,
per=None,
kValue=None,
trajFile="tmp.traj",
shift_x_flag=shift_x_flag,
shift_x_value=shift_x_value,
shift_betax_flag=shift_betax_flag,
shift_betax_value=shift_betax_value)
data = numpy.loadtxt("tmp.traj",skiprows=15)
fig = plt.figure(1)
fig.add_subplot(221)
plt.plot(data[:,1],data[:,7])
plt.title("Magnetic Field "+file_field)
plt.xlabel("Y [m]")
plt.ylabel("B [T]")
fig.add_subplot(222)
plt.plot(data[:,1],data[:,0])
plt.title("Electron trajectory")
plt.xlabel("Y [m]")
plt.ylabel("X [m]")
fig.add_subplot(223)
plt.plot(data[:,1],data[:,3])
plt.title("Electron velocity")
plt.xlabel("Y [m]")
plt.ylabel("betaX")
fig.add_subplot(224)
plt.plot(data[:,1],data[:,6])
plt.title("Electron curvature")
plt.xlabel("Y [m]")
plt.ylabel("curvature [m^-1]")
if plot_trajectories:
plt.show()
fig.savefig('sw_'+file_field+'.png')
plt.close(fig)
srfunc.wiggler_cdf(traj,
enerMin=e_min,
enerMax=e_max,
enerPoints=1001,
outFile="xshwig.sha",
elliptical=False)
def wiggler_source(ener_gev=6.0,e_min=5000.0,e_max=5005.0,iwrite=1,emittance=0):
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe0.BENER = 6.0
oe0.CONV_FACT = 100.0
oe0.FDISTR = 0
oe0.FILE_TRAJ = b'xshwig.sha'
oe0.FSOUR = 0
oe0.FSOURCE_DEPTH = 0
oe0.F_COLOR = 0
oe0.F_PHOT = 0
oe0.F_WIGGLER = 1
oe0.HDIV1 = 1.0
oe0.HDIV2 = 1.0
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.ISTAR1 = 5676561
oe0.NCOL = 0
oe0.NPOINT = 20000
oe0.NTOTALPOINT = 0
oe0.N_COLOR = 0
oe0.PH1 = e_min
oe0.PH2 = e_max
oe0.POL_DEG = 0.0
oe0.SIGMAY = 0.0
oe0.VDIV1 = 1.0
oe0.VDIV2 = 1.0
oe0.WXSOU = 0.0
oe0.WYSOU = 0.0
oe0.WZSOU = 0.0
if emittance == 0:
oe0.SIGMAX = 0.0
oe0.SIGMAZ = 0.0
oe0.EPSI_X = 0.0
oe0.EPSI_Z = 0.0
oe0.EPSI_DX = 0.0
oe0.EPSI_DZ = 0.0
else:
# oe0.SIGMAX = 0.001373
# oe0.SIGMAZ = 3.64e-04
# oe0.EPSI_X = oe0.SIGMAX * 16.524e-6 # 2.28e-08
# oe0.EPSI_Z = oe0.SIGMAZ * 1.623e-6 # 5e-10
# oe0.EPSI_DX = 65.1
# oe0.EPSI_DZ = -28.55
oe0.SIGMAX = 0.0008757
oe0.SIGMAZ = 0.0001647
oe0.EPSI_X = oe0.SIGMAX * 24.72e-6 # 2.16e-08
oe0.EPSI_Z = oe0.SIGMAZ * 3.036e-6 # 5e-10
oe0.EPSI_DX = 89.4
oe0.EPSI_DZ = -104.8
#Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
return beam,oe0
def bm_source(ener_gev=6.0,e_min=5000.0,e_max=5005.0,iwrite=0,emittance=0):
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe0.BENER = 6.04
if emittance == 0:
oe0.SIGMAX = 0.0
oe0.SIGMAZ = 0.0
oe0.EPSI_X = 0.0
oe0.EPSI_Z = 0.0
oe0.EPSI_DX = 0.0
oe0.EPSI_DZ = 0.0
else:
oe0.SIGMAX = 77.9e-4
oe0.SIGMAZ = 12.9e-4
oe0.EPSI_X = oe0.SIGMAX * 110.9e-6 # 8.6e-07
oe0.EPSI_Z = oe0.SIGMAZ * 0.5e-6 # 6.45e-10
oe0.EPSI_DX = 0.0
oe0.EPSI_DZ = 0.0
oe0.FDISTR = 4
oe0.FSOURCE_DEPTH = 4
oe0.F_COLOR = 3
oe0.F_PHOT = 0
oe0.HDIV1 = 0.001
oe0.HDIV2 = 0.001
oe0.NCOL = 0
oe0.N_COLOR = 0
oe0.PH1 = e_min
oe0.PH2 = e_max
oe0.POL_DEG = 0.0
oe0.R_ALADDIN = 2353
oe0.R_MAGNET = 23.53
oe0.SIGDIX = 0.0
oe0.SIGDIZ = 0.0
oe0.NPOINT = 20000
oe0.SIGMAY = 0.0
oe0.VDIV1 = 1.0
oe0.VDIV2 = 1.0
oe0.WXSOU = 0.0
oe0.WYSOU = 0.0
oe0.WZSOU = 0.0
#Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
return beam,oe0
def focusing_mirror(beam,foc="1:1",grazing_theta_mrad=3.0,iwrite=0):
if foc == "1:1":
dist_p = 3000.0
dist_q = 3000.0
fmirr = 3 # toroid
elif foc == "3:1":
dist_p = 4500.0
dist_q = 1500.0
fmirr = 2 # ellipsoid
else:
raise "Problems..."
oe1 = Shadow.OE()
oe1.DUMMY = 1.0
oe1.FHIT_C = 1
oe1.FMIRR = fmirr
oe1.FWRITE = 3
oe1.RLEN1 = 50.0
oe1.RLEN2 = 50.0
oe1.RWIDX1 = 10.0
oe1.RWIDX2 = 10.0
oe1.T_IMAGE = dist_q
oe1.T_INCIDENCE = 90.0 - grazing_theta_mrad * 1e-3 * 180 / numpy.pi
oe1.T_REFLECTION = oe1.T_INCIDENCE
oe1.T_SOURCE = dist_p
# run optical element 1
print(" Running optical element: %d"%(1))
if iwrite:
oe1.write("start.01")
beam.traceOE(oe1,1)
if iwrite:
oe1.write("end.01")
beam.write("star.01")
return beam,oe1
def focusing_ideal(beam,iwrite=0):
# implements an infinite elliptical mirror at 45 deg.
oe1 = Shadow.OE()
oe1.DUMMY = 1.0
oe1.FMIRR = 2
oe1.FWRITE = 3
oe1.T_IMAGE = 3000.0
oe1.T_INCIDENCE = 45.0
oe1.T_REFLECTION = 45.0
oe1.T_SOURCE = 3000.0
# run optical element 1
print(" Running optical element: %d"%(1))
if iwrite:
oe1.write("start.01")
beam.traceOE(oe1,1)
if iwrite:
oe1.write("end.01")
beam.write("star.01")
return beam,oe1
if __name__ == "__main__":
wigglers = ["2PAcut"] # ["BM","3Pcut","2PAcut","2PBcut","1Pcut","3P","2PA","2PB","1P","0P"] #
energies = [5] # [5,10,20,40,80] #
emittance = 1
# initialize
plot_trajectories = 0
txt_all = ""
txt_latex = ""
plot_index = -1
xrange_1 = [-0.015,0.015]
yrange_1 = [-0.002,0.002]
xrange_3 = xrange_1 # [-0.015,0.015]
yrange_3 = yrange_1 # [-0.01,0.01]
for root_file in wigglers:
if emittance:
title0 = "Finite Emittance; magnetic field: %s"%(root_file)
else:
title0 = "Zero Emittance; magnetic field: %s"%(root_file)
txt_all += "\n == %s ==\n\n"%(title0)
file_field = "SW_"+root_file+".txt"
#plot trajectories
txt_all += "\n\n [%s] \n\n"%('sw_'+file_field+'.png')
for energy_kev in energies:
# start ENERGY loop HERE #######################
e_min = energy_kev * 1000.0
e_max = e_min + e_min/1000.0 # 0.1%bw
if emittance:
title = "Finite Emittance; magnetic field: %s; E=%d keV"%(root_file,energy_kev)
png_label="_withemitt_"
else:
title = "Zero Emittance; magnetic field: %s; E=%d keV"%(root_file,energy_kev)
png_label="_zeroemitt_"
txt_all += "\n === %s ===\n\n"%(title)
#
# source
#
# preprocessor ["BM","3Pcut","2PAcut","2PBcut","1Pcut"]
if root_file == "BM":
pass
#
# first, only the short IDs
#
elif root_file == "1Pcut":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=4,shift_x_flag=4)
elif root_file == "2PAcut":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=1,shift_x_flag=1)
elif root_file == "2PBcut":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=1,shift_x_flag=1)
elif root_file == "3Pcut":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=0,shift_x_flag=1)
#
# now, also with side BMs
#
elif root_file == "0P":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=4,shift_x_flag=4)
elif root_file == "1P":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=4,shift_x_flag=4)
elif root_file == "2PA":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=4,shift_x_flag=4)
elif root_file == "2PB":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=4,shift_x_flag=4)
elif root_file == "3P":
wiggler_preprocessor(e_min=e_min,e_max=e_max,file_field=file_field,
plot_trajectories=plot_trajectories,shift_betax_flag=4,shift_x_flag=4)
else:
raise "Error: root_file not valid"
#shadow source
if root_file == "BM":
beam, oe0 = bm_source(e_min=e_min,e_max=e_max,emittance=emittance)
else:
beam, oe0 = wiggler_source(e_min=e_min,e_max=e_max,emittance=emittance)
tmp0 = oe0.sourcinfo()
# plot divergences
subtitle = "Divergence space"
tkt0 = Shadow.ShadowTools.plotxy_gnuplot(beam,4,6,xrange=[-0.003,0.003],yrange=[-250e-6,250e-6],nolost=1,
ref="Yes",nbins=201,ps=1,viewer="ls ",title="Div space; Units: rad; "+subtitle,
calculate_widths=2)
if tkt0["fw25%m_h"] == None: tkt0["fw25%m_h"] = -1e-4
if tkt0["fw25%m_v"] == None: tkt0["fw25%m_v"] = -1e-4
if tkt0["fwhm_h"] == None: tkt0["fwhm_h"] = -1e-4
if tkt0["fwhm_v"] == None: tkt0["fwhm_v"] = -1e-4
if tkt0["fw75%m_h"] == None: tkt0["fw75%m_h"] = -1e-4
if tkt0["fw75%m_v"] == None: tkt0["fw75%m_v"] = -1e-4
txt_all += "\n ==== %s ====\n\n"%(subtitle)
txt_all += "\n\n"
txt_all += " | FW25M | H: %6.3f urad | V: %6.3f urad \n"%(1e6*tkt0["fw25%m_h"],1e6*tkt0["fw25%m_v"])
txt_all += " | FWHM | H: %6.3f urad | V: %6.3f urad \n"%(1e6*tkt0["fwhm_h"], 1e6*tkt0["fwhm_v"])
txt_all += " | FW75M | H: %6.3f urad | V: %6.3f urad \n"%(1e6*tkt0["fw75%m_h"],1e6*tkt0["fw75%m_v"])
plot_index += 1
file_png = "sw%s%000d.png"%(png_label,plot_index)
os.system("convert -rotate 90 plotxy.ps %s"%(file_png))
txt_all += "\n\n [%s] \n\n"%(file_png)
# plot top view
subtitle = "Emission intensity - electron trajectory (top view)"
txt_all += "\n ==== %s ====\n\n"%(subtitle)
txt_all += "\n\n"
Shadow.ShadowTools.plotxy_gnuplot(beam,2,1,nbins=201,ps=1,viewer="ls ",title="Units=cm; %s"%subtitle)
plot_index += 1
file_png = "sw%s%000d.png"%(png_label,plot_index)
os.system("convert -rotate 90 plotxy.ps %s"%(file_png))
txt_all += "\n\n [%s] \n\n"%(file_png)
#
# ideal focusing
#
beam,oe1 = focusing_ideal(beam)
subtitle = "Ideal 1:1 focusing"
# plot divergences
# xrange=[-500e-4/4,500e-4/4],yrange=[-500e-4/16,500e-4/16]
tkt1 = Shadow.ShadowTools.plotxy_gnuplot(beam,1,3,xrange=xrange_1, yrange=yrange_1,
nolost=1,ref="Yes",nbins=201,ps=1,viewer="ls ",
title="Real space; Units=cm; %s"%subtitle,
calculate_widths=2)
if tkt1["fw25%m_h"] == None: tkt1["fw25%m_h"] = -1e-4
if tkt1["fw25%m_v"] == None: tkt1["fw25%m_v"] = -1e-4
if tkt1["fwhm_h"] == None: tkt1["fwhm_h"] = -1e-4
if tkt1["fwhm_v"] == None: tkt1["fwhm_v"] = -1e-4
if tkt1["fw75%m_h"] == None: tkt1["fw75%m_h"] = -1e-4
if tkt1["fw75%m_v"] == None: tkt1["fw75%m_v"] = -1e-4
txt_all += "\n ==== %s ====\n\n"%(subtitle)
txt_all += "\n\n"
txt_all += " | FW25M | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt1["fw25%m_h"],1e4*tkt1["fw25%m_v"])
txt_all += " | FWHM | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt1["fwhm_h"], 1e4*tkt1["fwhm_v"])
txt_all += " | FW75M | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt1["fw75%m_h"],1e4*tkt1["fw75%m_v"])
plot_index += 1
file_png = "sw%s%000d.png"%(png_label,plot_index)
os.system("convert -rotate 90 plotxy.ps %s"%(file_png))
txt_all += "\n\n [%s] \n\n"%(file_png)
#
# 1:1 focusing
#
beam = None
if root_file == "BM":
beam, oe0 = bm_source(e_min=e_min,e_max=e_max,emittance=emittance)
else:
beam, oe0 = wiggler_source(e_min=e_min,e_max=e_max,emittance=emittance)
beam,oe1 = focusing_mirror(beam,foc="1:1",grazing_theta_mrad=3.0)
subtitle = "Toroid 1:1 focusing"
tkt2 = Shadow.ShadowTools.plotxy_gnuplot(beam,1,3,xrange=xrange_1, yrange=yrange_1,
nolost=1,ref="Yes",nbins=301,ps=1,viewer="ls ",
title="Real space; Units=cm; %s"%subtitle,
calculate_widths=2)
if tkt2["fw25%m_h"] == None: tkt2["fw25%m_h"] = -1e-4
if tkt2["fw25%m_v"] == None: tkt2["fw25%m_v"] = -1e-4
if tkt2["fwhm_h"] == None: tkt2["fwhm_h"] = -1e-4
if tkt2["fwhm_v"] == None: tkt2["fwhm_v"] = -1e-4
if tkt2["fw75%m_h"] == None: tkt2["fw75%m_h"] = -1e-4
if tkt2["fw75%m_v"] == None: tkt2["fw75%m_v"] = -1e-4
txt_all += "\n ==== %s ====\n\n"%(subtitle)
txt_all += "\n\n"
txt_all += " | FW25M | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt2["fw25%m_h"],1e4*tkt2["fw25%m_v"])
txt_all += " | FWHM | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt2["fwhm_h"], 1e4*tkt2["fwhm_v"])
txt_all += " | FW75M | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt2["fw75%m_h"],1e4*tkt2["fw75%m_v"])
plot_index += 1
file_png = "sw%s%000d.png"%(png_label,plot_index)
os.system("convert -rotate 90 plotxy.ps %s"%(file_png))
txt_all += "\n\n [%s] \n\n"%(file_png)
#
#
# 3:1 focusing
#
beam = None
if root_file == "BM":
beam, oe0 = bm_source(e_min=e_min,e_max=e_max,emittance=emittance)
else:
beam, oe0 = wiggler_source(e_min=e_min,e_max=e_max,emittance=emittance)
beam,oe1 = focusing_mirror(beam,foc="3:1",grazing_theta_mrad=3.0)
subtitle = "Ellipsoid 3:1 focusing"
# xrange=[-500e-4/2,500e-4/2],yrange=[-500e-4/4,500e-4/4]
tkt3 = Shadow.ShadowTools.plotxy_gnuplot(beam,1,3, xrange=xrange_3, yrange=yrange_3,
nolost=1,ref="Yes",nbins=151,ps=1,viewer="ls ",
title="Real space; Units=cm; %s"%subtitle,
calculate_widths=2)
if tkt3["fw25%m_h"] == None: tkt3["fw25%m_h"] = -1e-4
if tkt3["fw25%m_v"] == None: tkt3["fw25%m_v"] = -1e-4
if tkt3["fwhm_h"] == None: tkt3["fwhm_h"] = -1e-4
if tkt3["fwhm_v"] == None: tkt3["fwhm_v"] = -1e-4
if tkt3["fw75%m_h"] == None: tkt3["fw75%m_h"] = -1e-4
if tkt3["fw75%m_v"] == None: tkt3["fw75%m_v"] = -1e-4
txt_all += "\n ==== %s ====\n\n"%(subtitle)
txt_all += "\n\n"
txt_all += " | FW25M | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt3["fw25%m_h"],1e4*tkt3["fw25%m_v"])
txt_all += " | FWHM | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt3["fwhm_h"], 1e4*tkt3["fwhm_v"])
txt_all += " | FW75M | H: %6.3f um | V: %6.3f um \n"%(1e4*tkt3["fw75%m_h"],1e4*tkt3["fw75%m_v"])
plot_index += 1
file_png = "sw%s%000d.png"%(png_label,plot_index)
os.system("convert -rotate 90 plotxy.ps %s"%(file_png))
txt_all += "\n\n [%s] \n\n"%(file_png)
if root_file != "0P":
txt_latex += "%d & %s & %d & %d & %5.2f & %5.2f & %5.2f & %5.2f & %5.2f & %5.2f %s%s \n"%(energy_kev,root_file,\
1e6*tkt0["fwhm_h"],1e6*tkt0["fwhm_v"],\
1e4*tkt1["fwhm_h"],1e4*tkt1["fwhm_v"],\
1e4*tkt2["fwhm_h"],1e4*tkt2["fwhm_v"],\
1e4*tkt3["fwhm_h"],1e4*tkt3["fwhm_v"],\
"\\","\\")
txt_latex += "\hline \n"
#
# dump results
#
if emittance:
file_latex = "table2.txt"
else:
file_latex = "table1.txt"
f = open(file_latex,"w")
f.write(txt_latex)
f.close()
print("File written to disk: %s"%(file_latex))
if emittance:
root_file_out = 'short_wigglers_with_emittance'
else:
root_file_out = 'short_wigglers_no_emittance'
file_out = root_file_out+'.t2t'
f = open(file_out,"w")
f.write(txt_all)
f.close()
print("File written to disk: ",file_out)
os.system("./txt2tags -t html --toc --enum-title %s"%file_out)
print("All done.")
print(tmp0)
# if plot_trajectories: plt.show()
#os.system("open %s.html"%root_file_out)
| srio/shadow3-scripts | short-wigglers/short_wigglers.py | Python | mit | 19,246 |
#
# DAPLink Interface Firmware
# Copyright (c) 2016-2016, ARM Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import struct
import numbers
import time
import usb.util
class USBMsd(object):
"""Wrapper class for a MSD usb device"""
# Bulk only transport documented in
# "Universal Serial Bus Mass Storage Class"
# SCSI commands documented in "SCSI Commands Reference Manual" by Seagate
CLASS_MSD = 0x8
# Write 10
# Read 10
# Test unit ready
# Request Sense
# dCBWSignature
# dCBWTag
# dCBWDataTransferLength
# bmCBWFlags
# bCBWLUN
# bCBWCBLength
FMT_CBW = "<IIIBBB"
# dCSWSignature
# dCSWTag
# dCSWDataResidue
# bCSWStatus
FMT_CSW = "<IIIB"
CSW_STATUS_PASSED = 0
CSW_STATUS_FAILED = 1
CSW_STATUS_PHASE_ERROR = 2
class SCSIError(Exception):
def __init__(self, error):
Exception.__init__(self)
self.value = error
# Some SCSI commands
# Value Keil middleware define Seagate name
# 0x12 SCSI_INQUIRY INQUIRY
# 0x23 SCSI_READ_FORMAT_CAPACITIES Missing
# 0x25 SCSI_READ_CAPACITY READ CAPACITY (10)
# 0x28 SCSI_READ10 READ (10)
# 0x1A SCSI_MODE_SENSE6 MODE SENSE (6)
# 0x00 SCSI_TEST_UNIT_READY TEST UNIT READY
# 0x2A SCSI_WRITE10 WRITE (10)
# 0x03 SCSI_REQUEST_SENSE REQUEST SENSE
# 0x1E SCSI_MEDIA_REMOVAL Missing
def __init__(self, device):
self._dev = device
self._if = None
self.ep_in = None
self.ep_out = None
self._locked = False
self._cbw_tag = 0
self.timeout = 60 * 1000
# delays are for testing only
self.delay_cbw_to_data = 0
self.delay_data_to_csw = 0
# Find interface
for interface in device.get_active_configuration():
if interface.bInterfaceClass == USBMsd.CLASS_MSD:
assert self._if is None
self._if = interface
assert self._if is not None
# Find endpoints
for endpoint in self._if:
if endpoint.bEndpointAddress & 0x80:
assert self.ep_in is None
self.ep_in = endpoint
else:
assert self.ep_out is None
self.ep_out = endpoint
assert self.ep_in is not None
assert self.ep_out is not None
def lock(self):
"""Acquire exclisive access to MSD"""
assert not self._locked
num = self._if.bInterfaceNumber
try:
if self._dev.is_kernel_driver_active(num):
self._dev.detach_kernel_driver(num)
except NotImplementedError:
pass
except usb.core.USBError:
pass
usb.util.claim_interface(self._dev, num)
self._locked = True
def unlock(self):
"""Release exclusive access to MSD"""
assert self._locked
num = self._if.bInterfaceNumber
usb.util.release_interface(self._dev, num)
try:
self._dev.attach_kernel_driver(num)
except NotImplementedError:
pass
except usb.core.USBError:
pass
self._locked = False
def scsi_read10(self, lba, block_count):
"""Send the SCSI read 10 command and return the data read"""
block_size = 512
cbwcb = bytearray(10)
cbwcb[0] = 0x28
cbwcb[2] = (lba >> (8 * 3)) & 0xFF
cbwcb[3] = (lba >> (8 * 2)) & 0xFF
cbwcb[4] = (lba >> (8 * 1)) & 0xFF
cbwcb[5] = (lba >> (8 * 0)) & 0xFF
cbwcb[7] = (block_count >> (8 * 1)) & 0xFF
cbwcb[8] = (block_count >> (8 * 0)) & 0xFF
ret, data = self._msd_transfer(cbwcb, 0, block_count * block_size)
if ret != self.CSW_STATUS_PASSED:
raise self.SCSIError(ret)
return data
def scsi_write10(self, lba, data):
"""Send the SCSI write 10 command"""
block_size = 512
assert len(data) % block_size == 0
block_count = (len(data) + (block_size - 1)) // block_size
cbwcb = bytearray(10)
cbwcb[0] = 0x2A
cbwcb[2] = (lba >> (8 * 3)) & 0xFF
cbwcb[3] = (lba >> (8 * 2)) & 0xFF
cbwcb[4] = (lba >> (8 * 1)) & 0xFF
cbwcb[5] = (lba >> (8 * 0)) & 0xFF
cbwcb[7] = (block_count >> (8 * 1)) & 0xFF
cbwcb[8] = (block_count >> (8 * 0)) & 0xFF
ret, _ = self._msd_transfer(cbwcb, 0, data)
if ret != self.CSW_STATUS_PASSED:
raise self.SCSIError(ret)
def scsi_test_unit_ready(self):
"""Send the SCSI test unit ready command and return status"""
cbwcb = bytearray(10)
cbwcb[0] = 0
ret, _ = self._msd_transfer(cbwcb, 0)
return ret
def _msd_transfer(self, cbwcb, lun, size_or_data=None):
"""Perform a bulk only transfer"""
assert self._locked
assert 1 <= len(cbwcb) <= 16
# Increment packet tag
transfer_tag = self._cbw_tag
self._cbw_tag = (self._cbw_tag + 1) & 0xFFFFFFFF
# None means data size of zero
if size_or_data is None:
size_or_data = 0
in_transfer = isinstance(size_or_data, numbers.Number)
transfer_size = (size_or_data if in_transfer else len(size_or_data))
assert in_transfer or len(size_or_data) > 0
# Phase - Command transport
cbw_signature = 0x43425355
cbw_tag = transfer_tag
cbw_data_transfer_length = transfer_size
cbw_flags = (1 << 7) if in_transfer else 0
cbw_lun = lun
cbw_length = len(cbwcb)
params = [cbw_signature, cbw_tag, cbw_data_transfer_length,
cbw_flags, cbw_lun, cbw_length]
cbw = struct.pack(self.FMT_CBW, *params)
pad_size = 16 - len(cbwcb)
payload = cbw + cbwcb + bytearray(pad_size)
self.ep_out.write(payload)
if self.delay_cbw_to_data != 0:
time.sleep(self.delay_cbw_to_data)
# Phase - Data Out or Data In (Optional)
data = None
if transfer_size > 0:
endpoint = self.ep_in if in_transfer else self.ep_out
try:
if in_transfer:
data = self.ep_in.read(transfer_size, self.timeout)
else:
self.ep_out.write(size_or_data, self.timeout)
except usb.core.USBError:
endpoint.clear_halt()
if self.delay_data_to_csw != 0:
time.sleep(self.delay_data_to_csw)
# Phase - Status Transport
csw = self.ep_in.read(13, self.timeout)
csw_signature, csw_tag, csw_data_residue, csw_status = \
struct.unpack(self.FMT_CSW, csw)
assert csw_signature == 0x53425355
assert csw_tag == transfer_tag
#TODO - check residue
return (csw_status, data)
class Struct(object):
"""Base class for a C structure"""
def __init__(self, name, structure, data):
field_list = [field[0] for field in structure]
fmt_list = [field[1] for field in structure]
format_str = "<" + "".join(fmt_list)
struct_size = struct.calcsize(format_str)
value_list = struct.unpack(format_str, data[:struct_size])
value_dict = {}
for name, value in zip(field_list, value_list):
value_dict[name] = value
self.name = name
self.format_str = format_str
self.field_list = field_list
self.value_dict = value_dict
self.size = struct_size
def __getitem__(self, key):
return self.value_dict[key]
def __setitem__(self, key, value):
self.value_dict[key] = value
def __str__(self):
desc = ""
desc += self.name + ":" + os.linesep
for field in self.field_list:
value = self.value_dict[field]
if isinstance(value, bytes):
value = list(bytearray(value))
desc += (" %s=%s" + os.linesep) % (field, value)
return desc
def pack(self):
"""Return a byte representation of this structure"""
value_list = []
for field in self.field_list:
value_list.append(self.value_dict[field])
return struct.pack(self.format_str, *value_list)
class MBR(Struct):
"""Wrapper class for a FAT MBR"""
STRUCTURE = (
("BS_jmpBoot", "3s"),
("BS_OEMName", "8s"),
("BPB_BytsPerSec", "H"),
("BPB_SecPerClus", "B"),
("BPB_RsvdSecCnt", "H"),
("BPB_NumFATs", "B"),
("BPB_RootEntCnt", "H"),
("BPB_TotSec16", "H"),
("BPB_Media", "B"),
("BPB_FATSz16", "H"),
("BPB_SecPerTrk", "H"),
("BPB_NumHeads", "H"),
("BPB_HiddSec", "L"),
("BPB_TotSec32", "L"),
)
def __init__(self, data, sector=None):
Struct.__init__(self, "MBR", self.STRUCTURE, data)
self.sector = sector
class DirectoryEntry(Struct):
"""Wrapper class for a FAT DirectoryEntry"""
STRUCTURE = (
("DIR_Name", "11s"),
("DIR_Attr", "B"),
("DIR_NTRes", "B"),
("DIR_CrtTimeTenth", "B"),
("DIR_CrtTime", "H"),
("DIR_CrtDate", "H"),
("DIR_LstAccDate", "H"),
("DIR_FstClusHI", "H"),
("DIR_WrtTime", "H"),
("DIR_WrtDate", "H"),
("DIR_FstClusLO", "H"),
("DIR_FileSize", "L"),
)
def __init__(self, data):
Struct.__init__(self, "DirectoryEntry", self.STRUCTURE, data)
class Directory(object):
"""Wrapper class for a FAT Directory"""
ENTRY_SIZE = 32
def __init__(self, entry_count, data, sector=None):
directory_list = []
for i in range(entry_count):
start = i * self.ENTRY_SIZE
dir_data = data[start:start + self.ENTRY_SIZE]
entry = DirectoryEntry(dir_data)
directory_list.append(entry)
self.directory_list = directory_list
self.sector = sector
def __iter__(self):
return iter(self.directory_list)
def __getitem__(self, key):
return self.directory_list[key]
def find_free_entry_index(self):
"""Find a free index in this Directory or return None"""
for idx, directory in enumerate(self.directory_list):
name_data = bytearray(directory["DIR_Name"])
if name_data[0] in (0x00, 0xE5):
return idx
return None
def pack(self):
"""Return a byte a Directory"""
data = bytearray()
for directory in self.directory_list:
data.extend(directory.pack())
return data
class Fat(object):
"""Wrapper class for a FAT filesystem on a SCSI device"""
SECTOR_SIZE = 512
CLUSTER_SIZE = 4 * 1024
def __init__(self, msd):
self.msd = msd
self.reload()
def reload(self):
"""Reload all internal data of this Fat filesystem"""
# Read MBR
mbr_data = self.msd.scsi_read10(0, 1)
mbr = MBR(mbr_data, 0)
# Read in the root directory
root_dir_sec = (mbr["BPB_RsvdSecCnt"] +
(mbr["BPB_NumFATs"] * mbr["BPB_FATSz16"]))
sec_count = (mbr["BPB_RootEntCnt"] * 32 + 512 - 1) // 512
root_dir_data = self.msd.scsi_read10(root_dir_sec, sec_count)
root_dir = Directory(mbr["BPB_RootEntCnt"], root_dir_data,
root_dir_sec)
self.mbr = mbr
self.root_dir = root_dir
| sg-/DAPLink | test/usb_msd.py | Python | apache-2.0 | 12,170 |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.coghq.BossLobbyGui
from panda3d.core import TextNode, Vec4
from direct.gui.DirectGui import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
class BossLobbyGui(DirectFrame):
class InitialFrame(DirectFrame):
frame = 0
class LobbySelection(DirectButton):
def __init__(self, parent, **kw):
optiondefs = (('relief', None, None),
('image_scale', 0.55, None),
('text_pos', (0.3, -0.0225), None),
('text_scale', 0.075, None))
self.defineoptions(kw, optiondefs)
DirectButton.__init__(self, relief=None)
self.initialiseoptions(BossLobbyGui.InitialFrame.LobbySelection)
return
def __init__(self, parent, callback, **kw):
optiondefs = (('relief', None, None),
('state', DGG.NORMAL, None),
('image', DGG.getDefaultDialogGeom(), None),
('image_scale', (1.0, 1.0, 0.75), None),
('image_color', ToontownGlobals.GlobalDialogColor, None),
('pos', (0, 0, 0), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(BossLobbyGui.InitialFrame)
self.callback = callback
self.selection = -1
self.load()
return
def destroy(self):
if hasattr(self, 'title') and self.title:
self.title.destroy()
del self.title
if hasattr(self, 'buttons') and len(self.buttons):
for button in self.buttons:
button.destroy()
del self.buttons
if hasattr(self, 'okButton') and self.okButton:
self.okButton.destroy()
del self.okButton
if hasattr(self, 'cancelButton') and self.cancelButton:
self.cancelButton.destroy()
del self.cancelButton
DirectFrame.destroy(self)
def load(self):
empty = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
self.emptyList = (empty.find('**/minnieCircle'), empty.find('**/minnieCircle'), empty.find('**/minnieCircle'))
okImageList = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr'))
cancelImageList = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
empty.removeNode()
buttons.removeNode()
self.title = DirectLabel(parent=self, relief=None, text='Select a Lobby', textMayChange=1, text_scale=0.1, pos=(0, 0, 0.25))
self.okButton = DirectButton(parent=self, relief=None, image=okImageList, pos=(-0.1, 0, -0.275), command=self.nextFrame, extraArgs=[True])
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, pos=(0.1, 0, -0.275), command=self.nextFrame, extraArgs=[False])
self.buttons = []
public = BossLobbyGui.InitialFrame.LobbySelection(self, image=self.emptyList, pos=(-0.35, 0, 0.075), text='Public', command=self.setSelection, extraArgs=[0])
private = BossLobbyGui.InitialFrame.LobbySelection(self, image=self.emptyList, pos=(-0.3475, 0, -0.075), text='Private', command=self.setSelection, extraArgs=[1])
self.buttons.extend([public, private])
return
def setSelection(self, buttonId):
newSelection = self.buttons[buttonId]
if newSelection:
for button in self.buttons:
button.setColor(1, 1, 1, 1)
newSelection.setColor(0, 1, 0, 1)
self.selection = buttonId
def getSelection(self):
return self.selection
def nextFrame(self, status):
if status and self.getSelection() >= 0:
options = {'lobbyType': self.getSelection()}
self.callback(self.frame + 1, options)
else:
self.callback(-1)
class SecondaryFrame(DirectFrame):
frame = 1
class LobbyList(DirectScrolledList):
def __init__(self, parent, **kw):
buttons = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
arrowGui = (buttons.find('**/tt_t_gui_mat_arrowUp'), buttons.find('**/tt_t_gui_mat_arrowDown'), buttons.find('**/tt_t_gui_mat_arrowDisabled'))
buttons.removeNode()
optiondefs = (('relief', None, None),
('pos', (-0.375, 0, -0.045), None),
('numItemsVisible', 4, None),
('forceHeight', 0.12, None),
('itemFrame_relief', DGG.SUNKEN, None),
('itemFrame_pos', (0, 0, 0), None),
('itemFrame_scale', 1.0, None),
('itemFrame_borderWidth', (0.015, 0.015), None),
('itemFrame_frameSize', (-0.325, 0.225, -0.325, 0.2), None),
('itemFrame_frameColor', (0.85, 0.95, 1, 1), None),
('decButton_image', arrowGui, None),
('decButton_relief', None, None),
('decButton_pos', (0.31, 0, 0.025), None),
('decButton_hpr', (0, 0, -90), None),
('decButton_scale', 0.5, None),
('incButton_image', arrowGui, None),
('incButton_relief', None, None),
('incButton_pos', (0.31, 0, -0.175), None),
('incButton_hpr', (0, 0, 90), None),
('incButton_scale', 0.5, None))
self.defineoptions(kw, optiondefs)
DirectScrolledList.__init__(self, relief=None)
self.initialiseoptions(BossLobbyGui.SecondaryFrame.LobbyList)
return
class LobbyListItem(DirectFrame):
def __init__(self, parent, itemText, callback, **kw):
optiondefs = (('relief', None, None), ('frameColor', (0.85, 0.95, 1, 1), None), ('frameSize', (-0.31, 0.21, 0.055, 0.185), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(BossLobbyGui.SecondaryFrame.LobbyListItem)
self.button = DirectButton(parent=self, relief=None, text=itemText, text_align=TextNode.ALeft, text_fg=Vec4(0, 0, 0, 1), text3_fg=(0.4, 0.8, 0.4, 1), text1_bg=(1, 1, 0, 1), text2_bg=(0.5, 0.9, 1, 1), pos=(-0.28, 0, 0.105), scale=0.065, command=callback, extraArgs=[itemText])
return
def destroy(self):
if hasattr(self, 'button') and self.button:
self.button.destroy()
DirectFrame.destroy(self)
class LobbyEntry(DirectEntry):
def __init__(self, parent, **kw):
optiondefs = (('relief', DGG.SUNKEN, None),
('borderWidth', (0.25, 0.25), None),
('pos', (-0.675, 0, 0.285), None),
('scale', (0.05, 0.055, 0.055), None),
('numLines', 1, None),
('focus', 1, None),
('frameColor', (0.85, 0.95, 1, 1), None))
self.defineoptions(kw, optiondefs)
DirectEntry.__init__(self, relief=None)
self.initialiseoptions(BossLobbyGui.SecondaryFrame.LobbyEntry)
return
def __init__(self, parent, callback, **kw):
optiondefs = (('relief', None, None),
('state', DGG.NORMAL, None),
('image', DGG.getDefaultDialogGeom(), None),
('image_scale', (1.6, 1.0, 1.3), None),
('image_color', ToontownGlobals.GlobalDialogColor, None),
('pos', (0, 0, 0), None))
self.defineoptions(kw, optiondefs)
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(BossLobbyGui.SecondaryFrame)
self.callback = callback
self.items = []
self.selection = None
self.friendsOnly = False
self.laffLimit = False
self.lobbyName = None
self.isCreating = False
self.load()
return
def destroy(self):
if hasattr(self, 'titleLeft') and self.titleLeft:
self.titleLeft.destroy()
del self.titleLeft
if hasattr(self, 'lobbies') and self.lobbies:
self.lobbies.destroy()
del self.lobbies
if hasattr(self, 'entry') and self.entry:
self.entry.destroy()
del self.entry
if hasattr(self, 'cancelButton') and self.cancelButton:
self.cancelButton.destroy()
del self.cancelButton
if hasattr(self, 'nextButton') and self.nextButton:
self.nextButton.destroy()
del self.nextButton
if hasattr(self, 'nameLabel') and self.nameLabel:
self.nameLabel.destroy()
del self.nameLabel
if hasattr(self, 'nameEntry') and self.nameEntry:
self.nameEntry.destroy()
del self.nameEntry
if hasattr(self, 'friendLabel') and self.friendLabel:
self.friendLabel.destroy()
del self.friendLabel
if hasattr(self, 'friendCheckbox') and self.friendCheckbox:
self.friendCheckbox.destroy()
del self.friendCheckbox
if hasattr(self, 'laffLabel') and self.laffLabel:
self.laffLabel.destroy()
del self.laffLabel
if hasattr(self, 'laffCheckbox') and self.laffCheckbox:
self.laffCheckbox.destroy()
del self.laffCheckbox
DirectFrame.destroy(self)
def load(self):
empty = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
buttons = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
cancelImageList = (buttons.find('**/tt_t_gui_mat_closeUp'), buttons.find('**/tt_t_gui_mat_closeDown'), buttons.find('**/tt_t_gui_mat_closeDown'))
nextImageList = (buttons.find('**/tt_t_gui_mat_nextUp'), buttons.find('**/tt_t_gui_mat_nextDown'), buttons.find('**/tt_t_gui_mat_nextDown'))
emptyList = (empty.find('**/minnieCircle'), empty.find('**/minnieCircle'), empty.find('**/minnieCircle'))
empty.removeNode()
buttons.removeNode()
self.titleLeft = DirectLabel(parent=self, relief=None, text='Select a Lobby', textMayChange=1, text_scale=0.08, pos=(-0.435, 0, 0.475))
self.titleRight = DirectLabel(parent=self, relief=None, text='Create a Lobby', textMayChange=1, text_scale=0.08, pos=(0.39, 0, 0.475))
self.lobbies = BossLobbyGui.SecondaryFrame.LobbyList(self)
self.entry = BossLobbyGui.SecondaryFrame.LobbyEntry(self, command=self.loadItemsToList)
self.items = ['Loudrob',
'Jake',
'Voltage',
'Daniel',
'Mel']
self.nameLabel = DirectLabel(parent=self, relief=None, text='Name:', text_scale=0.06, pos=(0.125, 0, 0.285))
self.nameEntry = BossLobbyGui.SecondaryFrame.LobbyEntry(self, command=self.setLobbyName, pos=(0.27, 0, 0.285), width=9)
self.friendLabel = DirectLabel(parent=self, relief=None, text='Friends Only?', text_scale=0.06, pos=(0.221, 0, 0.085))
self.friendCheckbox = DirectButton(parent=self, relief=None, image=emptyList, pos=(0.62, 0, 0.095), scale=0.55, color=(1, 0, 0, 1), command=self.toggleFriendsOnly)
self.laffLabel = DirectLabel(parent=self, relief=None, text='70+ Laff Only?', text_scale=0.06, pos=(0.251, 0, -0.115))
self.laffCheckbox = DirectButton(parent=self, relief=None, image=emptyList, pos=(0.62, 0, -0.105), scale=0.55, color=(1, 0, 0, 1), command=self.toggleLaffLimit)
self.cancelButton = DirectButton(parent=self, relief=None, image=cancelImageList, pos=(-0.65, 0, -0.535), scale=0.57, command=self.nextFrame, extraArgs=[False])
self.nextButton = DirectButton(parent=self, relief=None, image=nextImageList, pos=(0.65, 0, -0.535), scale=0.3, command=self.nextFrame, extraArgs=[True])
return
def loadItemsToList(self, entryText):
if hasattr(self, 'lobbies') and self.lobbies:
self.lobbies.destroy()
self.lobbies = BossLobbyGui.SecondaryFrame.LobbyList(self)
toAdd = []
for i in self.items:
if i.lower().startswith(entryText.lower()):
toAdd.append(i)
for i in sorted(toAdd):
newItem = BossLobbyGui.SecondaryFrame.LobbyListItem(self, i, self.setSelection)
self.lobbies.addItem(newItem)
def setSelection(self, selection):
self.selection = selection
def getSelection(self):
return self.selection
def toggleFriendsOnly(self):
if self.friendsOnly:
self.friendsOnly = False
self.friendCheckbox.setColor(1, 0, 0, 1)
else:
self.friendsOnly = True
self.friendCheckbox.setColor(0, 1, 0, 1)
def getFriendsOnly(self):
return self.friendsOnly
def toggleLaffLimit(self):
if self.laffLimit:
self.laffLimit = False
self.laffCheckbox.setColor(1, 0, 0, 1)
else:
self.laffLimit = True
self.laffCheckbox.setColor(0, 1, 0, 1)
def getLaffLimit(self):
return self.laffLimit
def setLobbyName(self, name):
self.isCreating = bool(name)
self.lobbyName = name
def getLobbyName(self):
return self.lobbyName
def nextFrame(self, status):
if status:
if self.getSelection():
options = {'selected': self.getSelection()}
self.callback(self.frame + 1, options)
elif self.isCreating:
options = {'name': self.lobbyName,
'friends': self.getFriendsOnly(),
'laff': self.getLaffLimit()}
self.callback(self.frame + 1, options)
else:
self.callback(-1)
else:
self.callback(-1)
def __init__(self, callback, av):
DirectFrame.__init__(self)
self.callback = callback
self.avatar = av
self.frame = None
return
def destroy(self):
if hasattr(self, 'frame') and self.frame:
self.frame.destroy()
del self.frame
DirectFrame.destroy(self)
def loadFrame(self, frameNum, args = {}):
if hasattr(self, 'frame') and self.frame:
self.frame.destroy()
if frameNum == -1:
self.callback(self.avatar, False)
elif frameNum == 0:
self.frame = BossLobbyGui.InitialFrame(self, self.loadFrame)
elif frameNum == 1 and args.get('lobbyType') is not None:
lobby = args.get('lobbyType')
if lobby == 0:
self.callback(self.avatar, True)
elif lobby == 1:
self.frame = BossLobbyGui.SecondaryFrame(self, self.loadFrame)
elif frameNum == 2:
selection = args.get('selected')
name = args.get('name')
if selection:
self.callback(self.avatar, True)
elif name:
friendsOnly = args.get('friends')
laffLimit = args.get('laff')
self.callback(self.avatar, True)
else:
self.callback(self.avatar, False)
return
GUI_EDITOR = "\nfrom toontown.coghq.BossLobbyGui import BossLobbyGui\n\ntest = BossLobbyGui(None, None)\ntest.loadFrame(1, {'lobbyType': 1})\n" | DedMemez/ODS-August-2017 | coghq/BossLobbyGui.py | Python | apache-2.0 | 16,583 |
VERSION = "0.3.0" | mattjmorrison/pyowa-9-2011 | mattspackage/__init__.py | Python | mit | 17 |
import numpy as np;
np.set_printoptions(linewidth=40, precision=5, suppress=True)
import pandas as pd; pd.options.display.max_rows=80;pd.options.display.expand_frame_repr=False;pd.options.display.max_columns=20
import pylab as plt;
import os; home=os.path.expanduser('~') +'/'
import sys;sys.path.insert(1,'/home/arya/workspace/bio/')
from CLEAR.Libs.Markov import Markov
import Utils.Util as utl
import Utils.Simulation as Simulation
import matplotlib as mpl
import seaborn as sns
import Utils.Plots as pplt
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 56});
mpl.rc('text', usetex=True)
sns.set_style("whitegrid", {"grid.color": "1", 'axes.linewidth': .5, "grid.linewidth": ".09"})
subptitle = list('ABCDEFGHI')
def createSelectionSimulations(s=0.1,maxGen=100):
def runSim(i):
try:
sim = Simulation.Simulation(maxGeneration=maxGen, generationStep=1, s=s, foldInitialAFs=False,
ModelName='Markov', L=1000, numReplicates=1,
makeSureSelectedSiteDontGetLost=False, experimentID=0)
x=sim.X[:,sim.siteUnderSelection,0]
except:
x=np.zeros(sim.X.shape[0])
x[:]=None
if not i%1000: print s, i
return x
X=map(runSim,range(10000))
a=pd.DataFrame(X)
a.to_pickle(utl.outpath + 'markov/T{}.S{:03.0f}.obs.df'.format(maxGen, s * 1000))
print 'done!'
def plotNull(subp, nu0=0.005, fontsize=5):
obs = pd.read_pickle(utl.outpath + 'markov/neutral.obs.{}.pkl'.format(nu0))
T = Markov.computeTransition(0, N=1000)
dfplt = pd.concat([pd.Series({'scale': 10, 'xlim': [0.0, 0.01], 'ylim': [0, 1]}, name=(0.005, 1)),
pd.Series({'scale': 30, 'xlim': [0.06, 0.14], 'ylim': [0, 0.15]}, name=(0.1, 1)),
pd.Series({'scale': 30, 'xlim': [0.0, 0.015], 'ylim': [0, 0.3]}, name=(0.005, 10)),
pd.Series({'scale': 45, 'xlim': [0.0, 0.2], 'ylim': [0, 0.025]}, name=(0.1, 10)),
pd.Series({'scale':30, 'xlim':[0.0,0.03],'ylim': [0,0.2]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
xx=np.arange(0,1,0.00001)
N=200; tau=1;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx);
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.subplot(3, 3, subp[0]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[1].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[1].flatten(),bins=500,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*4)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0)$')
tau=10
for _ in range(9):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1]);
brownian.plot(color='r');
markov.plot(color='b');
o=pd.Series(obs.X[10].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[10].flatten(),bins=100,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*20)
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
tau=100
for _ in range(90):
markov=markov.dot(T)
N=200;h=2*nu0*(1-nu0);sig2=h*tau/N;brownian=stats.norm(nu0, sig2).pdf(xx)
brownian=pd.Series(brownian,index=xx);brownian/=brownian.sum();brownian.name='Brownian Motion';
brownian*=dfplt.loc[(nu0,tau)].scale
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2]);
brownian.plot(color='r');
markov.plot(color='b')
o=pd.Series(obs.X[100].flatten()).value_counts().sort_index();o=o/o.sum();
if nu0==0.1:
counts,limits=np.histogram(obs.X[100].flatten(),bins=30,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/(obs.X.shape[1]*obs.X.shape[2]*60)
o.name = 'Observation';
o.plot(color='g')
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s=0$, $\nu_0=${}, $\tau$={}'.format(nu0, tau), loc=1, fontsize=fontsize)
if subp[2] == 3:
plt.legend(loc='center right', fontsize=fontsize)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
def plotAlternative(subp, s=0.1, fontsize=5):
nu0=0.005
obs = pd.read_pickle(utl.outpath + 'markov/T100.S{:03.0f}.obs.df'.format(s * 1000))
T = Markov.computeTransition(s, 1000)
dfplt= pd.concat([pd.Series({'scale':10, 'xlim':[0.0,0.01],'ylim': [0,0.2]},name=(0.005,1)),pd.Series({'scale':30, 'xlim':[0.06,0.14],'ylim': [0,0.15]},name=(0.1,1)),
pd.Series({'scale':30, 'xlim':[0.0,0.015],'ylim': [0,0.15]},name=(0.005,10)),pd.Series({'scale':45, 'xlim':[0.0,0.2],'ylim': [0,0.025]},name=(0.1,10)),
pd.Series({'scale':30, 'xlim':[0.0,1],'ylim': [0,0.01]},name=(0.005,100)),pd.Series({'scale':50, 'xlim':[0.00,0.4],'ylim': [0,0.004]},name=(0.1,100))
],axis=1).T
markov=T.loc[nu0].copy(True);markov.name='Markov Chain'
plt.subplot(3, 3, subp[0])
tau=1
o=(obs[1].value_counts().sort_index()/obs.shape[0])
o.loc[0.0055]=0.1211
o.index=o.index-0.0005/2
markov.plot(color='b');
o.plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.ylabel(r'$P(\nu_\tau|\nu_0,s)$')
plt.xlabel('$s$')
tau=10
for _ in range(9):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[0] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[1])
markov.plot(color='b');
(obs[10].value_counts().sort_index() / obs.shape[0]).plot(color='g');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
tau=100
for _ in range(90):
markov=markov.dot(T)
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[1] - 1]), fontsize=fontsize)
plt.subplot(3, 3, subp[2])
counts,limits=np.histogram(obs[100].values,bins=50,range=[0,1]);centers = 0.5*(limits[1:]+limits[:-1]);o=pd.Series(counts,index=centers);o=o/obs.shape[0]
o/=35
o.loc[0.0] = o.iloc[0]
o = o.sort_index()
o.iloc[1] = o.iloc[2]
# o=(obs[100].value_counts().sort_index()/obs.shape[0])
o.name = 'Observation';
o.plot(color='g');
markov.plot(color='b');
plt.xlim(dfplt.loc[(nu0, tau)].xlim);
plt.ylim(dfplt.loc[(nu0, tau)].ylim);
plt.locator_params(nbins=3)
pplt.annotate(r'$s={}$, $\nu_0=${}, $\tau$={}'.format(s, nu0, tau), loc=1, fontsize=fontsize)
plt.xlabel('$s$')
pplt.setSize(plt.gca(), fontsize=fontsize)
plt.title('({})'.format(subptitle[subp[2] - 1]), fontsize=fontsize)
if __name__ == '__main__':
# createNeutralSimulations()
# createSelectionSimulations(s=0.01)
# createSelectionSimulations(s=0.1)
reload(pplt)
dpi = 200;
fig = plt.figure(figsize=(6.2, 4), dpi=dpi);
pplt.setStyle(lw=1);
fontsize = 7
plotNull(range(1, 4), fontsize=fontsize);
plotNull(range(4, 7), 0.1, fontsize=fontsize);
plotAlternative(range(7, 10), fontsize=fontsize);
plt.tight_layout()
pplt.savefig('markovDists', dpi=dpi);
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
print 'Done'
| airanmehr/bio | Scripts/TimeSeriesPaper/Plot/Markov.py | Python | mit | 8,854 |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = [ "useradmin" ]
NICE_NAME = "User Admin"
REQUIRES_HADOOP = False
ICON = "useradmin/art/icon_useradmin_48.png"
MENU_INDEX = 60
PERMISSION_ACTIONS = (
("access_view:useradmin:edit_user", "Access to profile page on User Admin"),
)
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/useradmin/src/useradmin/settings.py | Python | gpl-2.0 | 1,015 |
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import logging
from StatusCheck import StatusCheck
import AmbariConfig
from StackVersionsFileHandler import StackVersionsFileHandler
from ActualConfigHandler import ActualConfigHandler
logger = logging.getLogger()
class LiveStatus:
SERVICES = [
"HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
"NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
"KERBEROS", "TEMPLETON", "HIVE", "WEBHCAT",
"YARN", "MAPREDUCE2", "FLUME"
]
COMPONENTS = [
{"serviceName" : "HDFS",
"componentName" : "DATANODE"},
{"serviceName" : "HDFS",
"componentName" : "NAMENODE"},
{"serviceName" : "HDFS",
"componentName" : "SECONDARY_NAMENODE"},
{"serviceName" : "HDFS",
"componentName" : "JOURNALNODE"},
{"serviceName" : "HDFS",
"componentName" : "ZKFC"},
{"serviceName" : "MAPREDUCE",
"componentName" : "JOBTRACKER"},
{"serviceName" : "MAPREDUCE",
"componentName" : "TASKTRACKER"},
{"serviceName" : "GANGLIA",
"componentName" : "GANGLIA_SERVER"},
{"serviceName" : "GANGLIA",
"componentName" : "GANGLIA_MONITOR"},
{"serviceName" : "HBASE",
"componentName" : "HBASE_MASTER"},
{"serviceName" : "HBASE",
"componentName" : "HBASE_REGIONSERVER"},
{"serviceName" : "NAGIOS",
"componentName" : "NAGIOS_SERVER"},
{"serviceName" : "FLUME",
"componentName" : "FLUME_SERVER"},
{"serviceName" : "ZOOKEEPER",
"componentName" : "ZOOKEEPER_SERVER"},
{"serviceName" : "OOZIE",
"componentName" : "OOZIE_SERVER"},
{"serviceName" : "HCATALOG",
"componentName" : "HCATALOG_SERVER"},
{"serviceName" : "KERBEROS",
"componentName" : "KERBEROS_SERVER"},
{"serviceName" : "HIVE",
"componentName" : "HIVE_SERVER"},
{"serviceName" : "HIVE",
"componentName" : "HIVE_METASTORE"},
{"serviceName" : "HIVE",
"componentName" : "MYSQL_SERVER"},
{"serviceName" : "WEBHCAT",
"componentName" : "WEBHCAT_SERVER"},
{"serviceName" : "YARN",
"componentName" : "RESOURCEMANAGER"},
{"serviceName" : "YARN",
"componentName" : "NODEMANAGER"},
{"serviceName" : "MAPREDUCE2",
"componentName" : "HISTORYSERVER"},
]
LIVE_STATUS = "STARTED"
DEAD_STATUS = "INSTALLED"
def __init__(self, cluster, service, component, globalConfig, config):
self.cluster = cluster
self.service = service
self.component = component
self.globalConfig = globalConfig
versionsFileDir = config.get('agent', 'prefix')
self.versionsHandler = StackVersionsFileHandler(versionsFileDir)
self.actualConfigHandler = ActualConfigHandler(config)
def belongsToService(self, component):
#TODO: Should also check belonging of server to cluster
return component['serviceName'] == self.service
# Live status was stripped from heartbeat after revision e1718dd
def build(self):
global SERVICES, COMPONENTS, LIVE_STATUS, DEAD_STATUS
statusCheck = StatusCheck(AmbariConfig.servicesToPidNames,
AmbariConfig.pidPathesVars, self.globalConfig,
AmbariConfig.servicesToLinuxUser)
livestatus = None
for component in self.COMPONENTS:
if component["serviceName"] == self.service and component["componentName"] == self.component:
serviceStatus = statusCheck.getStatus(component["componentName"])
if serviceStatus is None:
logger.warn("There is no service to pid mapping for " + component["componentName"])
status = self.LIVE_STATUS if serviceStatus else self.DEAD_STATUS
livestatus ={"componentName" : component["componentName"],
"msg" : "",
"status" : status,
"clusterName" : self.cluster,
"serviceName" : self.service,
"stackVersion": self.versionsHandler.
read_stack_version(component["componentName"])
}
active_config = self.actualConfigHandler.read_actual_component(component['componentName'])
if not active_config is None:
livestatus['configurationTags'] = active_config
break
logger.debug("The live status for component " + str(self.component) +\
" of service " + str(self.service) + " is " + str(livestatus))
return livestatus
def main(argv=None):
for service in SERVICES:
livestatus = LiveStatus('', service)
print json.dumps(livestatus.build())
if __name__ == '__main__':
main()
| telefonicaid/fiware-cosmos-ambari | ambari-agent/src/main/python/ambari_agent/LiveStatus.py | Python | apache-2.0 | 5,367 |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 15:34:18 2012
Author: Josef Perktold
"""
from statsmodels.compatnp.py3k import BytesIO, asbytes
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from statsmodels.stats.libqsturng import qsturng
ss = '''\
43.9 1 1
39.0 1 2
46.7 1 3
43.8 1 4
44.2 1 5
47.7 1 6
43.6 1 7
38.9 1 8
43.6 1 9
40.0 1 10
89.8 2 1
87.1 2 2
92.7 2 3
90.6 2 4
87.7 2 5
92.4 2 6
86.1 2 7
88.1 2 8
90.8 2 9
89.1 2 10
68.4 3 1
69.3 3 2
68.5 3 3
66.4 3 4
70.0 3 5
68.1 3 6
70.6 3 7
65.2 3 8
63.8 3 9
69.2 3 10
36.2 4 1
45.2 4 2
40.7 4 3
40.5 4 4
39.3 4 5
40.3 4 6
43.2 4 7
38.7 4 8
40.9 4 9
39.7 4 10'''
#idx Treatment StressReduction
ss2 = '''\
1 mental 2
2 mental 2
3 mental 3
4 mental 4
5 mental 4
6 mental 5
7 mental 3
8 mental 4
9 mental 4
10 mental 4
11 physical 4
12 physical 4
13 physical 3
14 physical 5
15 physical 4
16 physical 1
17 physical 1
18 physical 2
19 physical 3
20 physical 3
21 medical 1
22 medical 2
23 medical 2
24 medical 2
25 medical 3
26 medical 2
27 medical 3
28 medical 1
29 medical 3
30 medical 1'''
ss3 = '''\
1 24.5
1 23.5
1 26.4
1 27.1
1 29.9
2 28.4
2 34.2
2 29.5
2 32.2
2 30.1
3 26.1
3 28.3
3 24.3
3 26.2
3 27.8'''
ss5 = '''\
2 - 3\t4.340\t0.691\t7.989\t***
2 - 1\t4.600\t0.951\t8.249\t***
3 - 2\t-4.340\t-7.989\t-0.691\t***
3 - 1\t0.260\t-3.389\t3.909\t-
1 - 2\t-4.600\t-8.249\t-0.951\t***
1 - 3\t-0.260\t-3.909\t3.389\t'''
cylinders = np.array([8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 6, 6, 6, 4, 4,
4, 4, 4, 4, 6, 8, 8, 8, 8, 4, 4, 4, 4, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 6, 6,
6, 6, 4, 4, 4, 4, 4, 8, 4, 6, 6, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 6, 6, 4, 6, 4, 4, 4, 4, 4, 4, 4, 4])
cyl_labels = np.array(['USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'France',
'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Japan',
'Germany', 'France', 'Germany', 'Sweden', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'USA', 'USA', 'France', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'Germany',
'Japan', 'USA', 'USA', 'USA', 'USA', 'Germany', 'Japan', 'Japan', 'USA', 'Sweden', 'USA', 'France',
'Japan', 'Germany', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA', 'USA',
'Germany', 'Japan', 'Japan', 'USA', 'USA', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'Japan', 'USA',
'USA', 'USA', 'USA', 'Japan', 'USA', 'USA', 'USA', 'Germany', 'USA', 'USA', 'USA'])
#accommodate recfromtxt for python 3.2, requires bytes
ss = asbytes(ss)
ss2 = asbytes(ss2)
ss3 = asbytes(ss3)
ss5 = asbytes(ss5)
dta = np.recfromtxt(BytesIO(ss), names=("Rust","Brand","Replication"))
dta2 = np.recfromtxt(BytesIO(ss2), names = ("idx", "Treatment", "StressReduction"))
dta3 = np.recfromtxt(BytesIO(ss3), names = ("Brand", "Relief"))
dta5 = np.recfromtxt(BytesIO(ss5), names = ('pair', 'mean', 'lower', 'upper', 'sig'), delimiter='\t')
sas_ = dta5[[1,3,2]]
from statsmodels.stats.multicomp import (tukeyhsd, pairwise_tukeyhsd,
MultiComparison)
#import statsmodels.sandbox.stats.multicomp as multi
#print tukeyhsd(dta['Brand'], dta['Rust'])
def get_thsd(mci, alpha=0.05):
var_ = np.var(mci.groupstats.groupdemean(), ddof=len(mci.groupsunique))
means = mci.groupstats.groupmean
nobs = mci.groupstats.groupnobs
resi = tukeyhsd(means, nobs, var_, df=None, alpha=alpha,
q_crit=qsturng(1-alpha, len(means), (nobs-1).sum()))
#print resi[4]
var2 = (mci.groupstats.groupvarwithin() * (nobs - 1.)).sum() \
/ (nobs - 1.).sum()
#print nobs, (nobs - 1).sum()
#print mci.groupstats.groupvarwithin()
assert_almost_equal(var_, var2, decimal=14)
return resi
class CheckTuckeyHSDMixin(object):
@classmethod
def setup_class_(self):
self.mc = MultiComparison(self.endog, self.groups)
self.res = self.mc.tukeyhsd(alpha=self.alpha)
def test_multicomptukey(self):
assert_almost_equal(self.res.meandiffs, self.meandiff2, decimal=14)
assert_almost_equal(self.res.confint, self.confint2, decimal=2)
assert_equal(self.res.reject, self.reject2)
def test_group_tukey(self):
res_t = get_thsd(self.mc, alpha=self.alpha)
assert_almost_equal(res_t[4], self.confint2, decimal=2)
def test_shortcut_function(self):
#check wrapper function
res = pairwise_tukeyhsd(self.endog, self.groups, alpha=self.alpha)
assert_almost_equal(res.confint, self.res.confint, decimal=14)
class TestTuckeyHSD2(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#balanced case
self.endog = dta2['StressReduction']
self.groups = dta2['Treatment']
self.alpha = 0.05
self.setup_class_() #in super
#from R
tukeyhsd2s = np.array([ 1.5,1,-0.5,0.3214915,
-0.1785085,-1.678509,2.678509,2.178509,
0.6785085,0.01056279,0.1079035,0.5513904]
).reshape(3,4, order='F')
self.meandiff2 = tukeyhsd2s[:, 0]
self.confint2 = tukeyhsd2s[:, 1:3]
pvals = tukeyhsd2s[:, 3]
self.reject2 = pvals < 0.05
class TestTuckeyHSD2s(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#unbalanced case
self.endog = dta2['StressReduction'][3:29]
self.groups = dta2['Treatment'][3:29]
self.alpha = 0.01
self.setup_class_()
#from R
tukeyhsd2s = np.array(
[1.8888888888888889, 0.888888888888889, -1, 0.2658549,
-0.5908785, -2.587133, 3.511923, 2.368656,
0.5871331, 0.002837638, 0.150456, 0.1266072]
).reshape(3,4, order='F')
self.meandiff2 = tukeyhsd2s[:, 0]
self.confint2 = tukeyhsd2s[:, 1:3]
pvals = tukeyhsd2s[:, 3]
self.reject2 = pvals < 0.01
class TestTuckeyHSD3(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#SAS case
self.endog = dta3['Relief']
self.groups = dta3['Brand']
self.alpha = 0.05
self.setup_class_()
#super(self, self).setup_class_()
#CheckTuckeyHSD.setup_class_()
self.meandiff2 = sas_['mean']
self.confint2 = sas_[['lower','upper']].view(float).reshape((3,2))
self.reject2 = sas_['sig'] == asbytes('***')
class TestTuckeyHSD4(CheckTuckeyHSDMixin):
@classmethod
def setup_class(self):
#unbalanced case verified in Matlab
self.endog = cylinders
self.groups = cyl_labels
self.alpha = 0.05
self.setup_class_()
self.res._simultaneous_ci()
#from Matlab
self.halfwidth2 = np.array([1.5228335685980883, 0.9794949704444682, 0.78673802805533644,
2.3321237694566364, 0.57355135882752939])
self.meandiff2 = np.array([0.22222222222222232, 0.13333333333333375, 0.0, 2.2898550724637685,
-0.088888888888888573, -0.22222222222222232, 2.0676328502415462,
-0.13333333333333375, 2.1565217391304348, 2.2898550724637685])
self.confint2 = np.array([-2.32022210717, 2.76466655161, -2.247517583, 2.51418424967,
-3.66405224956, 3.66405224956, 0.113960166573, 4.46574997835,
-1.87278583908, 1.6950080613, -3.529655688, 3.08521124356, 0.568180988881,
3.5670847116, -3.31822643175, 3.05155976508, 0.951206924521, 3.36183655374,
-0.74487911754, 5.32458926247]).reshape(10,2)
self.reject2 = np.array([False, False, False, True, False, False, True, False, True, False])
def test_hochberg_intervals(self):
assert_almost_equal(self.res.halfwidths, self.halfwidth2, 14)
| bavardage/statsmodels | statsmodels/stats/tests/test_pairwise.py | Python | bsd-3-clause | 8,751 |
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for MSFT OCS REST API client
"""
import mock
import requests
from requests import exceptions as requests_exceptions
from ironic.common import exception
from ironic.drivers.modules.msftocs import msftocsclient
from ironic.tests import base
FAKE_BOOT_RESPONSE = (
'<BootResponse xmlns="%s" '
'xmlns:i="http://www.w3.org/2001/XMLSchema-instance">'
'<completionCode>Success</completionCode>'
'<apiVersion>1</apiVersion>'
'<statusDescription>Success</statusDescription>'
'<bladeNumber>1</bladeNumber>'
'<nextBoot>ForcePxe</nextBoot>'
'</BootResponse>') % msftocsclient.WCSNS
FAKE_BLADE_RESPONSE = (
'<BladeResponse xmlns="%s" '
'xmlns:i="http://www.w3.org/2001/XMLSchema-instance">'
'<completionCode>Success</completionCode>'
'<apiVersion>1</apiVersion>'
'<statusDescription/>'
'<bladeNumber>1</bladeNumber>'
'</BladeResponse>') % msftocsclient.WCSNS
FAKE_POWER_STATE_RESPONSE = (
'<PowerStateResponse xmlns="%s" '
'xmlns:i="http://www.w3.org/2001/XMLSchema-instance">'
'<completionCode>Success</completionCode>'
'<apiVersion>1</apiVersion>'
'<statusDescription>Blade Power is On, firmware decompressed'
'</statusDescription>'
'<bladeNumber>1</bladeNumber>'
'<Decompression>0</Decompression>'
'<powerState>ON</powerState>'
'</PowerStateResponse>') % msftocsclient.WCSNS
FAKE_BLADE_STATE_RESPONSE = (
'<BladeStateResponse xmlns="%s" '
'xmlns:i="http://www.w3.org/2001/XMLSchema-instance">'
'<completionCode>Success</completionCode>'
'<apiVersion>1</apiVersion>'
'<statusDescription/>'
'<bladeNumber>1</bladeNumber>'
'<bladeState>ON</bladeState>'
'</BladeStateResponse>') % msftocsclient.WCSNS
class MSFTOCSClientApiTestCase(base.TestCase):
def setUp(self):
super(MSFTOCSClientApiTestCase, self).setUp()
self._fake_base_url = "http://fakehost:8000"
self._fake_username = "admin"
self._fake_password = 'fake'
self._fake_blade_id = 1
self._client = msftocsclient.MSFTOCSClientApi(
self._fake_base_url, self._fake_username, self._fake_password)
@mock.patch.object(requests, 'get', autospec=True)
def test__exec_cmd(self, mock_get):
fake_response_text = 'fake_response_text'
fake_rel_url = 'fake_rel_url'
mock_get.return_value.text = 'fake_response_text'
self.assertEqual(fake_response_text,
self._client._exec_cmd(fake_rel_url))
mock_get.assert_called_once_with(
self._fake_base_url + "/" + fake_rel_url, auth=mock.ANY)
@mock.patch.object(requests, 'get', autospec=True)
def test__exec_cmd_http_get_fail(self, mock_get):
fake_rel_url = 'fake_rel_url'
mock_get.side_effect = requests_exceptions.ConnectionError('x')
self.assertRaises(exception.MSFTOCSClientApiException,
self._client._exec_cmd,
fake_rel_url)
mock_get.assert_called_once_with(
self._fake_base_url + "/" + fake_rel_url, auth=mock.ANY)
def test__check_completion_code(self):
et = self._client._check_completion_code(FAKE_BOOT_RESPONSE)
self.assertEqual('{%s}BootResponse' % msftocsclient.WCSNS, et.tag)
def test__check_completion_code_fail(self):
self.assertRaises(exception.MSFTOCSClientApiException,
self._client._check_completion_code,
'<fake xmlns="%s"></fake>' % msftocsclient.WCSNS)
def test__check_completion_with_bad_completion_code_fail(self):
self.assertRaises(exception.MSFTOCSClientApiException,
self._client._check_completion_code,
'<fake xmlns="%s">'
'<completionCode>Fail</completionCode>'
'</fake>' % msftocsclient.WCSNS)
def test__check_completion_code_xml_parsing_fail(self):
self.assertRaises(exception.MSFTOCSClientApiException,
self._client._check_completion_code,
'bad_xml')
@mock.patch.object(
msftocsclient.MSFTOCSClientApi, '_exec_cmd', autospec=True)
def test_get_blade_state(self, mock_exec_cmd):
mock_exec_cmd.return_value = FAKE_BLADE_STATE_RESPONSE
self.assertEqual(
msftocsclient.POWER_STATUS_ON,
self._client.get_blade_state(self._fake_blade_id))
mock_exec_cmd.assert_called_once_with(
self._client, "GetBladeState?bladeId=%d" % self._fake_blade_id)
@mock.patch.object(
msftocsclient.MSFTOCSClientApi, '_exec_cmd', autospec=True)
def test_set_blade_on(self, mock_exec_cmd):
mock_exec_cmd.return_value = FAKE_BLADE_RESPONSE
self._client.set_blade_on(self._fake_blade_id)
mock_exec_cmd.assert_called_once_with(
self._client, "SetBladeOn?bladeId=%d" % self._fake_blade_id)
@mock.patch.object(
msftocsclient.MSFTOCSClientApi, '_exec_cmd', autospec=True)
def test_set_blade_off(self, mock_exec_cmd):
mock_exec_cmd.return_value = FAKE_BLADE_RESPONSE
self._client.set_blade_off(self._fake_blade_id)
mock_exec_cmd.assert_called_once_with(
self._client, "SetBladeOff?bladeId=%d" % self._fake_blade_id)
@mock.patch.object(
msftocsclient.MSFTOCSClientApi, '_exec_cmd', autospec=True)
def test_set_blade_power_cycle(self, mock_exec_cmd):
mock_exec_cmd.return_value = FAKE_BLADE_RESPONSE
self._client.set_blade_power_cycle(self._fake_blade_id)
mock_exec_cmd.assert_called_once_with(
self._client,
"SetBladeActivePowerCycle?bladeId=%d&offTime=0" %
self._fake_blade_id)
@mock.patch.object(
msftocsclient.MSFTOCSClientApi, '_exec_cmd', autospec=True)
def test_get_next_boot(self, mock_exec_cmd):
mock_exec_cmd.return_value = FAKE_BOOT_RESPONSE
self.assertEqual(
msftocsclient.BOOT_TYPE_FORCE_PXE,
self._client.get_next_boot(self._fake_blade_id))
mock_exec_cmd.assert_called_once_with(
self._client, "GetNextBoot?bladeId=%d" % self._fake_blade_id)
@mock.patch.object(
msftocsclient.MSFTOCSClientApi, '_exec_cmd', autospec=True)
def test_set_next_boot(self, mock_exec_cmd):
mock_exec_cmd.return_value = FAKE_BOOT_RESPONSE
self._client.set_next_boot(self._fake_blade_id,
msftocsclient.BOOT_TYPE_FORCE_PXE)
mock_exec_cmd.assert_called_once_with(
self._client,
"SetNextBoot?bladeId=%(blade_id)d&bootType=%(boot_type)d&"
"uefi=%(uefi)s&persistent=%(persistent)s" %
{"blade_id": self._fake_blade_id,
"boot_type": msftocsclient.BOOT_TYPE_FORCE_PXE,
"uefi": "true", "persistent": "true"})
| bacaldwell/ironic | ironic/tests/unit/drivers/modules/msftocs/test_msftocsclient.py | Python | apache-2.0 | 7,556 |
from . import util
from .image_class import ImageClass
from astropy.io import fits
import numpy as np
import logging
# clobber keyword is deprecated in astropy 1.3
from astropy import __version__
if __version__ < '1.3':
overwrite = {'clobber': True}
else:
overwrite = {'overwrite': True}
def calculate_difference_image(science, reference, gain_ratio=np.inf, gain_mask=None, sigma_cut=5., use_pixels=False,
show=False, percent=99, use_mask_for_gain=True, max_iterations=5, size_cut=True,
pixstack_limit=None):
"""
Calculate the difference image using the Zackay algorithm.
This is the main function that calculates the difference image using the
Zackay, Ofek, Gal-Yam 2016. It operates on ImageClass objects defined in
image_class.py. The function will fit the gain ratio if not provided.
Ultimately this calculates equation 13 in Zackay, Ofek, Gal-Yam 2016.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
gain_ratio : float, optional
Ration of the gains or flux based zero points of the two images.
gain_mask : str or numpy.ndarray, optional
Array or FITS file holding an array of pixels to use when fitting
the gain ratio.
sigma_cut : float, optional
Threshold (in standard deviations) to extract a star from the image (`thresh` in `sep.extract`).
use_pixels : bool, optional
Fit the gain ratio using pixels (True) or stars (False) in image.
show : bool, optional
Display debuggin plots during fitting.
percent : float, optional
Percentile cutoff to use for fitting the gain ratio.
use_mask_for_gain : bool, optional
Set to False in order to ignore the input masks when calculating the gain ratio.
max_iterations : int, optional
Maximum number of iterations to reconvolve the images for gain matching.
size_cut : bool, optinal
Ignore unusually large/small sources for gain matching (assumes most sources are real).
pixstack_limit : int, optional
Number of active object pixels in Sep, set with sep.set_extract_pixstack
Returns
-------
difference_image : numpy.ndarray
The difference between science and reference images.
"""
# match the gains
if gain_ratio == np.inf:
if gain_mask is not None:
if type(gain_mask) == str:
gain_mask_data = fits.getdata(gain_mask)
else:
gain_mask_data = gain_mask
science.mask[gain_mask_data == 1] = 1
reference.mask[gain_mask_data == 1] = 1
science.zero_point = util.solve_iteratively(science, reference, sigma_cut=sigma_cut, use_pixels=use_pixels,
show=show, percent=percent, use_mask=use_mask_for_gain,
max_iterations=max_iterations, size_cut=size_cut,
pixstack_limit=pixstack_limit)
else:
science.zero_point = gain_ratio
# create required arrays
science_image = science
reference_image = reference
science_psf = science.psf
reference_psf = reference.psf
# do fourier transforms (fft)
science_image_fft = np.fft.fft2(science_image)
reference_image_fft = np.fft.fft2(reference_image)
science_psf_fft = np.fft.fft2(science_psf)
reference_psf_fft = np.fft.fft2(reference_psf)
# calculate difference image
denominator = science.background_std ** 2 * reference.zero_point ** 2 * abs(reference_psf_fft) ** 2
denominator += reference.background_std ** 2 * science.zero_point ** 2 * abs(science_psf_fft) ** 2
difference_image_fft = science_image_fft * reference_psf_fft * reference.zero_point
difference_image_fft -= reference_image_fft * science_psf_fft * science.zero_point
difference_image_fft /= np.sqrt(denominator)
difference_image = np.fft.ifft2(difference_image_fft)
difference_image = np.real(difference_image)
return difference_image
def calculate_difference_image_zero_point(science, reference):
"""
Calculate the flux based zero point of the difference image.
Calculate the difference image flux based zero point using equation 15 of
Zackay, Ofek, Gal-Yam 2016.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
Returns
-------
difference_image_zero_point : float
Flux based zero point of the difference image.
"""
denominator = science.background_std ** 2 * reference.zero_point ** 2
denominator += reference.background_std ** 2 * science.zero_point ** 2
difference_image_zero_point = science.zero_point * reference.zero_point / np.sqrt(denominator)
logging.info('Global difference image zero point is {}'.format(np.mean(difference_image_zero_point)))
return difference_image_zero_point
def calculate_difference_psf(science, reference, difference_image_zero_point):
"""
Calculate the PSF of the difference image.
Calculactes the PSF of the difference image using equation 17 of Zackay,
Ofek, Gal-Yam 2016.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
difference_image_zero_point : float
Flux based zero point of the difference image.
Returns
-------
difference_psf : numpy.ndarray
PSF of the difference image.
"""
science_psf_fft = np.fft.fft2(science.psf)
reference_psf_fft = np.fft.fft2(reference.psf)
denominator = science.background_std ** 2 * reference.zero_point ** 2 * abs(reference_psf_fft) ** 2
denominator += reference.background_std ** 2 * science.zero_point ** 2 * abs(science_psf_fft) ** 2
difference_psf_fft = science.zero_point * science_psf_fft * reference_psf_fft
difference_psf_fft /= difference_image_zero_point * np.sqrt(denominator)
difference_psf = np.fft.ifft2(difference_psf_fft)
return difference_psf
def calculate_matched_filter_image(difference_image, difference_psf, difference_zero_point):
"""
Calculate the matched filter difference image.
Calculates the matched filter difference image described in Zackay, Ofek,
Gal-Yam 2016 defined in equation 16.
Parameters
----------
difference_image : numpy.ndarray
A difference image as calculated using calculate_difference_image.
difference_psf : numpy.ndarray
PSF for the difference image above.
difference_zero_point
Flux based zero point for the image above.
Returns
-------
matched_filter : numpy.ndarray
Matched filter image.
"""
matched_filter_fft = difference_zero_point * np.fft.fft2(difference_image) * np.conj(np.fft.fft2(difference_psf))
matched_filter = np.fft.ifft2(matched_filter_fft)
return matched_filter
def source_noise(image, kernel):
"""
Calculate source noise correction for matched filter image
Calculate the noise due to the sources in an image. The output is used by
noise corrected matched filter image. This is equation 26 in Zackay, Ofek,
Gal-Yam 2016.
Parameters
----------
image : PyZOGY.ImageClass
ImageClass instance with read_noise attribute defined.
kernel : numpy.ndarray
Convolution kernel for the noise image. This comes from the function
called noise_kernels.
Returns
-------
image_variance_corr : numpy.ndarray
Variance of the image due to source noise.
"""
if image.variance is None:
image.variance = np.copy(image.raw_image) + image.read_noise
image_variance_corr = np.fft.ifft2(np.fft.fft2(image.variance) * np.fft.fft2(kernel ** 2))
return image_variance_corr
def noise_kernels(science, reference):
"""
Calculate the convolution kernels used in the noise correction
The kernels calculated here are used in the convolution of the noise images
that are used in the noise corrected matched filter images. They are
defined in equation 28 and 29 of Zackay, Ofek, Gal-Yam 2016.
Parameters
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
Returns
-------
science_kernel : numpy.ndarray
Kernel for the convolution of arrays derived from the science image.
reference_kernel : numpy.ndarray
Kernel for the convolution of arrays derived from the reference image.
"""
science_psf_fft = np.fft.fft2(science.psf)
reference_psf_fft = np.fft.fft2(reference.psf)
denominator = reference.background_std ** 2 * science.zero_point ** 2 * abs(science_psf_fft) ** 2
denominator += science.background_std ** 2 * reference.zero_point ** 2 * abs(reference_psf_fft) ** 2
science_kernel_fft = science.zero_point * reference.zero_point ** 2
science_kernel_fft *= np.conj(reference_psf_fft) * abs(science_psf_fft) ** 2
science_kernel_fft /= denominator
science_kernel = np.fft.ifft2(science_kernel_fft)
reference_kernel_fft = reference.zero_point * science.zero_point ** 2
reference_kernel_fft *= np.conj(science_psf_fft) * abs(reference_psf_fft) ** 2
reference_kernel_fft /= denominator
reference_kernel = np.fft.ifft2(reference_kernel_fft)
return science_kernel, reference_kernel
def registration_noise(image, kernel):
"""
Calculate the registration noise for the noise correction
Calculates the astrometric registration noise image. This noise image is
used in the calculation of the noise corrected matched filter image.
Parameters
----------
image : PyZOGY.ImageClass
ImageClass instance with registration_noise attribute defined.
kernel : numpy.ndarray
Convolution kernel for the noise image. This comes from the function
called noise_kernels.
Returns
-------
reg_variance : numpy.ndarray
Noise image due to uncertainty in the image registration.
"""
matched_part = np.fft.ifft2(np.fft.fft2(image) * np.fft.fft2(kernel))
gradient = np.gradient(matched_part)
# registration_noise is (x, y), gradient is (row, col)
reg_variance = image.registration_noise[1] ** 2 * gradient[0] ** 2
reg_variance += image.registration_noise[0] ** 2 * gradient[1] ** 2
return reg_variance
def correct_matched_filter_image(science, reference):
"""
Calculate the noise corrected matched filter image
Computes the total noise used for the noise corrected matched filter image
as defined in equation 25 of Zackay, Ofek, Gal-Yam 2016. This will work
with the default read_noise and registration_noise, but it may not give
a meaningful result.
Parameters
----------
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
Returns
-------
noise : numpy.ndarray
The total noise in the matched filter image.
"""
science_kernel, reference_kernel = noise_kernels(science, reference)
science_source_noise = source_noise(science, science_kernel)
reference_source_noise = source_noise(reference, reference_kernel)
science_registration_noise = registration_noise(science, science_kernel)
reference_registration_noise = registration_noise(reference, reference_kernel)
noise = science_source_noise + reference_source_noise + science_registration_noise + reference_registration_noise
return noise
def photometric_matched_filter_image(science, reference, matched_filter):
"""
Calculate the photometry on the matched filter image
"""
# note this may do exactly what another function above does
# check this out later.
science_psf_fft = np.fft.fft2(science.psf)
reference_psf_fft = np.fft.fft2(reference.psf)
zero_point = science.zero_point ** 2 * reference.zero_point ** 2
zero_point *= abs(science_psf_fft) ** 2 * abs(reference_psf_fft) ** 2
denominator = reference.background_std ** 2 * science.zero_point ** 2 * abs(science_psf_fft) ** 2
denominator += science.background_std ** 2 * reference.zero_point ** 2 * abs(reference_psf_fft) ** 2
zero_point /= denominator
photometric_matched_filter = matched_filter / np.sum(zero_point)
return photometric_matched_filter
def normalize_difference_image(difference, difference_image_zero_point, science, reference, normalization='reference'):
"""
Normalize to user's choice of image
Normalizes the difference image into the photometric system of the science
image, reference image, or leave un-normalized.
Parameters
----------
difference : numpy.ndarray
Difference image as calculated by calculate_difference_image.
difference_image_zero_point : float
Flux based zero point of the difference image above.
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
reference : PyZOGY.ImageClass
ImageClass instance created from the reference image.
normalization : str, optional
Normalization choice. Options are 'reference', 'science', or 'none'.
Returns
-------
difference_image : numpy.ndarray
Normalized difference image.
"""
if normalization == 'reference' or normalization == 't':
difference_image = difference * reference.zero_point / difference_image_zero_point
elif normalization == 'science' or normalization == 'i':
difference_image = difference * science.zero_point / difference_image_zero_point
else:
difference_image = difference
logging.info('Difference normalized to {}'.format(normalization))
return difference_image
def run_subtraction(science_image, reference_image, science_psf, reference_psf, output=None,
science_mask=None, reference_mask=None, n_stamps=1, normalization='reference',
science_saturation=np.inf, reference_saturation=np.inf, science_variance=None,
reference_variance=None, matched_filter=False, photometry=False,
gain_ratio=np.inf, gain_mask=None, use_pixels=False, sigma_cut=5., show=False, percent=99,
corrected=False, use_mask_for_gain=True, max_iterations=5, size_cut=False, pixstack_limit=None):
"""
Run full subtraction given filenames and parameters
Main function for users who don't want to use the ImageClass. This function
lets the user put in all the arguments by hand and then creates the
ImageClass instances.
Parameters
----------
science_image : numpy.ndarray
Science image to compare to reference image.
reference_image : numpy.ndarray
Reference image to subtract from science.
science_psf : numpy.ndarray
PSF of the science image.
reference_psf : numpy.ndarray
PSF of the reference image.
output : str, optional
If provided, save the difference image to a FITS file with this file name (and its PSF to *.psf.fits).
science_mask : str, optional
Name of the FITS file holding the science image mask.
reference_mask : str, optional
Name of the FITS file holding the reference image mask.
n_stamps : int, optional
Number of stamps to use while fitting background.
normalization : str, optional
Normalize difference image to 'reference', 'science', or 'none'.
science_saturation : float, optional
Maximum usable pixel value in science image.
reference_saturation : float, optional
Maximum usable pixel value in reference image.
science_variance : numpy.ndarray or float, optional
Variance of the science image
reference_variance : numpy.ndarray or float, optional
Variance of the reference image.
matched_filter : bool, optional
Calculate the matched filter image.
photometry : bool, optional
Photometrically normalize the matched filter image.
gain_ratio : float, optional
Ratio between the flux based zero points of the images.
gain_mask : numpy.ndarray or str, optional
Array or FITS image of pixels to use in gain matching.
use_pixels : bool, optional
Use pixels (True) or stars (False) to match gains.
sigma_cut : float, optional
Threshold (in standard deviations) to extract a star from the image (`thresh` in `sep.extract`).
show : bool, optional
Show debugging plots.
percent : float, optional
Percentile cutoff for gain matching.
corrected : bool, optional
Noise correct matched filter image.
use_mask_for_gain : bool, optional
Set to False in order to ignore the input masks when calculating the gain ratio.
max_iterations : int, optional
Maximum number of iterations to reconvolve the images for gain matching.
size_cut : bool, optinal
Ignores unusually large/small sources for gain matching (assumes most sources are real).
pixstack_limit : int
Number of active object pixels in Sep, set with sep.set_extract_pixstack
Returns
-------
normalized_difference : numpy.ndarray
The normalized difference between science and reference images.
difference_psf : numpy.ndarray
The difference image PSF.
"""
science = ImageClass(science_image, science_psf, science_mask, n_stamps, science_saturation, science_variance)
reference = ImageClass(reference_image, reference_psf, reference_mask, n_stamps, reference_saturation,
reference_variance)
difference = calculate_difference_image(science, reference, gain_ratio, gain_mask, sigma_cut, use_pixels, show,
percent, use_mask_for_gain, max_iterations, size_cut, pixstack_limit)
difference_zero_point = calculate_difference_image_zero_point(science, reference)
difference_psf = calculate_difference_psf(science, reference, difference_zero_point)
normalized_difference = normalize_difference_image(difference, difference_zero_point, science, reference,
normalization)
if output:
save_difference_image_to_file(normalized_difference, science, normalization, output)
save_difference_psf_to_file(difference_psf, output.replace('.fits', '.psf.fits'))
if matched_filter:
matched_filter_image = calculate_matched_filter_image(difference, difference_psf, difference_zero_point)
if photometry and corrected:
logging.error('Photometric matched filter and noise corrected matched filter are incompatible')
if photometry:
matched_filter_image = photometric_matched_filter_image(science, reference, matched_filter_image)
elif corrected:
matched_filter_image /= np.sqrt(correct_matched_filter_image(science, reference))
fits.writeto(matched_filter, np.real(matched_filter_image), science.header, output_verify='warn', **overwrite)
logging.info('Wrote matched filter image to {}'.format(matched_filter))
return normalized_difference, difference_psf
def save_difference_image_to_file(difference_image, science, normalization, output):
"""
Save difference image to file.
Normalize and save difference image to file. This also copies over the
FITS header of the science image.
Parameters
----------
difference_image : numpy.ndarray
Difference image
science : PyZOGY.ImageClass
ImageClass instance created from the science image.
normalization : str
Normalize to 'reference', 'science', or 'none'.
output : str
File to save FITS image to.
"""
hdu = fits.PrimaryHDU(difference_image)
hdu.header = science.header.copy()
hdu.header['PHOTNORM'] = normalization
hdu.writeto(output, output_verify='warn', **overwrite)
logging.info('Wrote difference image to {}'.format(output))
def save_difference_psf_to_file(difference_psf, output):
"""
Save difference image psf to file.
Save the PSF of the difference image to a FITS file.
Parameters
----------
difference_psf : numpy.ndarray
PSF of the difference image.
output : str
File to save FITS image to.
"""
real_part = np.real(difference_psf)
center = np.array(real_part.shape) / 2
centered_psf = np.roll(real_part, center.astype(int), (0, 1))
fits.writeto(output, centered_psf, output_verify='warn', **overwrite)
logging.info('Wrote difference psf to {}'.format(output))
| dguevel/PyZOGY | PyZOGY/subtract.py | Python | mit | 21,240 |
# Copyright (c) 2011, Found IT A/S and Piped Project Contributors.
# See LICENSE for details.
import logging
import time
from twisted.application import service
from twisted.internet import defer
from zope import interface
from piped import event, util, resource
logger = logging.getLogger(__name__)
class TickProvider(service.MultiService):
""" Provides tick-batons that are sent to processors at regular intervals.
Example configuration::
ticks:
interval:
any_name:
interval: 120
processor: processor_name
auto_start: true # if true, starts the interval when the application starts.
The above will create a :class:`TickInterval` that generates a baton every 120 seconds or
every time the previous tick baton finished processing, whichever takes the
longest.
.. seealso:: :mod:`piped.processors.tick_processors`.
"""
interface.classProvides(resource.IResourceProvider)
def __init__(self):
super(TickProvider, self).__init__()
self.on_start = event.Event()
self.on_pause = event.Event()
self._tick_intervals = dict()
def configure(self, runtime_environment):
self.runtime_environment = runtime_environment
self.setServiceParent(runtime_environment.application)
enabled = runtime_environment.get_configuration_value('ticks.enabled', True)
if not enabled:
return
for interval_name, interval_config in runtime_environment.get_configuration_value('ticks.interval', dict()).items():
if not interval_config.pop('enabled', True):
continue
tick_interval = TickInterval(interval_name, **interval_config)
tick_interval.configure(runtime_environment)
tick_interval.setServiceParent(self)
self._tick_intervals[interval_name] = tick_interval
runtime_environment.resource_manager.register('ticks.interval.%s'%interval_name, provider=self)
def add_consumer(self, resource_dependency):
tick, interval, interval_name = resource_dependency.provider.split('.')
tick_interval = self._tick_intervals[interval_name]
resource_dependency.on_resource_ready(tick_interval)
class TickInterval(service.Service):
_waiting_for_processor = None
_sleeping = None
def __init__(self, interval_name, interval, processor, auto_start=True):
self.name = interval_name
self.interval = interval
self.processor_dependency_config = dict(provider=processor) if isinstance(processor, basestring) else processor
self._can_start = auto_start
self._previous_tick = time.time()
def configure(self, runtime_environment):
self.dependencies = runtime_environment.create_dependency_map(self, processor=self.processor_dependency_config)
def start_ticking(self):
self._can_start = True
if not self.running:
self.startService()
def stop_ticking(self):
self._can_start = False
if self.running:
self.stopService()
def startService(self):
if not self._can_start:
# the user has explicitly disallowed us from starting
return
service.Service.startService(self)
if self._waiting_for_processor:
return
self.produce_ticks()
def stopService(self):
service.Service.stopService(self)
if self._sleeping:
self._sleeping.cancel()
def _create_baton(self):
tick = time.time()
delta = tick - self._previous_tick
baton = dict(interval=self.interval, previous_tick=self._previous_tick, delta=delta, tick=tick)
self._previous_tick = tick
return baton
@defer.inlineCallbacks
def produce_ticks(self):
while self.running:
try:
self._waiting_for_processor = self.dependencies.wait_for_resource('processor')
processor = yield self._waiting_for_processor
baton = self._create_baton()
self._waiting_for_processor = processor(baton)
yield self._waiting_for_processor
except Exception as e:
logger.error('Error while waiting for waiting for processor', exc_info=True)
finally:
self._waiting_for_processor = None
# we might have stopped running while waiting for the processor to finish processing
if not self.running:
return
# the processing might have taken some time, so subtract the time taken from the interval before waiting
# we set the minimum sleep time to 0 in order to at least wait 1 reactor iteration between every
# produced baton.
processing_duration = time.time()-self._previous_tick
sleep_time = max(self.interval-processing_duration, 0)
try:
self._sleeping = util.wait(sleep_time)
yield self._sleeping
except defer.CancelledError:
return
finally:
self._sleeping = None
| foundit/Piped | piped/providers/tick_provider.py | Python | mit | 5,219 |
import gdb
import gdb.types
VERSION = "0.11.2-1"
def dumpCBlockIndex(bindex, length, indent = 0):
count = 0
length = int(length)
while bindex != 0 and count < length:
count+=1
obj = bindex.dereference()
gdb.write(" "*indent + str(bindex) + ": height:" + str(obj["nHeight"]) + " tx:" + str(obj["nTx"]) + " status:" + "0x%x" % obj["nStatus"] + " (" + int2BlockStatus(obj["nStatus"]) + ")" + "\n")
bindex = obj["pprev"]
class SpHelp(gdb.Command):
def __init__(self,cmds):
super(SpHelp, self).__init__("btc-help", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
self.cmds = cmds
def help(self):
return "btc-help: This help"
def invoke(self, argument, from_tty):
args = gdb.string_to_argv(argument)
if len(args)>0:
expr = args[0]
arg = gdb.parse_and_eval(expr)
# todo find this command and print help on only it
for c in self.cmds:
gdb.write(c.help() + "\n")
int2BlockStatusList = ( (gdb.parse_and_eval("BLOCK_VALID_HEADER") , "BLOCK_VALID_HEADER"),
(gdb.parse_and_eval("BLOCK_VALID_TREE") , "BLOCK_VALID_TREE "),
(gdb.parse_and_eval("BLOCK_VALID_CHAIN") , "BLOCK_VALID_CHAIN"),
(gdb.parse_and_eval("BLOCK_VALID_SCRIPTS") , "BLOCK_VALID_SCRIPTS"),
(gdb.parse_and_eval("BLOCK_HAVE_DATA") , "BLOCK_HAVE_DATA"),
(gdb.parse_and_eval("BLOCK_HAVE_UNDO") , "BLOCK_HAVE_UNDO"),
(gdb.parse_and_eval("BLOCK_EXCESSIVE") , "BLOCK_EXCESSIVE"),
(gdb.parse_and_eval("BLOCK_FAILED_VALID") , "BLOCK_FAILED_VALID"),
(gdb.parse_and_eval("BLOCK_FAILED_CHILD") , "BLOCK_FAILED_CHILD") )
def int2BlockStatus(x):
ret = []
for flag,val in int2BlockStatusList:
if x&flag: ret.append(val)
return " | ".join(ret)
class BuDumpCBlockIndex(gdb.Command):
def __init__(self):
super(BuDumpCBlockIndex, self).__init__("btc-dump-bidx", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def help(self):
return "btc-dump-bidx CBlockIndex* count: Dump the CBlockIndex chain count elements deep"
def invoke(self, argument, from_tty):
args = gdb.string_to_argv(argument)
if len(args)!=2:
gdb.write("args:\n CBlockIndex*: pointer to the chain\n number: how far to follow the chain\nexample: btc-dump-bidx pindex 10\n")
return
ptr = gdb.parse_and_eval(args[0])
count = gdb.parse_and_eval(args[1])
dumpCBlockIndex(ptr,count)
sd = BuDumpCBlockIndex()
SpHelp([sd])
gdb.write("Loaded Ensocoin GDB extensions %s.\nRun 'btc-help' for command help\n" % VERSION)
| arruah/ensocoin | src/btcgdb.py | Python | mit | 2,562 |
import json
import xml.etree.ElementTree as ET
from sqlalchemy import create_engine, text
import lib.es as es
# Run Module
def run(p):
AllGood = True
# Read Query and parse parameters
root = ET.fromstring(p["action"]["query"])
SQL_CONNECTION = root.find("SQL_CONNECTION").text
SQL = root.find("SQL").text.strip()
ELASTIC_SEARCH_HOST = root.find("ELASTIC_SEARCH_HOST").text
INDEX = root.find("INDEX").text
DOC_TYPE = root.find("DOC_TYPE").text
# Data Body
SQL_RESULT = None
# Create sql engine
engine = create_engine(SQL_CONNECTION)
# Get SQL connection
with engine.connect() as conn:
try:
p["log"].info(SQL)
SQL_RESULT = conn.execute(text(SQL))
p["log"].success("SQL excuted successfully.")
except Exception, e:
AllGood = False
p["log"].error("SQL execution failed",e)
p["log"].info("start updating document ...")
row_count = 0; curr_id = None; prev_id = None; body = {} ;
for r in SQL_RESULT: # check if id exists
if not "id" in r:
AllGood = False
p["log"].log("ERROR", "id doesn't exist\nid: {}\n{}".format(id, str(body)))
break
# save current id
curr_id = r["id"]
# try to detect when id is changed - means new document
if curr_id != prev_id and prev_id:
try:
es.update(ELASTIC_SEARCH_HOST, INDEX, DOC_TYPE, prev_id, body)
row_count += 1
except Exception,e:
p["log"].error("id: {}\n{}".format(curr_id, str(body)), e)
return False
finally:
# reset body content for the next document
body = {}
# Loop each column
for column, value in r.items():
# save id field and continue
if column == "id": continue;
# create column if doesn't exist
if not body.get(column): body[column] = []
# form body content
body[column].append(value)
# Save the current id
prev_id = curr_id
# index the last record
if prev_id:
es.update(ELASTIC_SEARCH_HOST, INDEX, DOC_TYPE, prev_id, body)
row_count += 1
# indexing completed
p["log"].success("Update index completed: {}".format(row_count))
return AllGood
| unkyulee/elastic-cms | src/task/modules/ESUPDATE.py | Python | mit | 2,529 |
# Copyright (c) 2016, Rogerthat
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from gig.authentication import login, logout
from gig.bizz import create_vm_flow_part_accounts, create_vm_flow_part_cloudspaces, create_vm_flow_part_images, \
create_vm_flow_part_info, create_vm, update_user_data, get_machine_console_info
from gig.utils import get_value_from_steps, get_email_and_app_id_from_userdetails
# https://gigbridge.rogerthat.net:4381/callback_api
def test_test(**params):
'''Rogerthat configuration test method.'''
return params['value']
def messaging_poke(tag, service_identity, user_details, **params):
email, app_id = get_email_and_app_id_from_userdetails(user_details)
if tag and tag == u"create_vm":
return create_vm_flow_part_accounts(service_identity, email, app_id)
elif tag and tag == u"console":
update_user_data(service_identity, email, app_id)
return None
def messaging_flow_member_result(flush_id, parent_message_key, service_identity, steps, user_details, **params):
email, app_id = get_email_and_app_id_from_userdetails(user_details)
if flush_id == u"flush_login":
username = get_value_from_steps(u"message_username", steps)
password = get_value_from_steps(u"message_password", steps)
return login(service_identity, email, app_id, username, password)
elif flush_id == u"flush_logout":
return logout(service_identity, email, app_id)
elif flush_id == u"flush_create_vm-part-accounts":
return create_vm_flow_part_cloudspaces(parent_message_key, steps, service_identity, email, app_id)
elif flush_id == u"flush_create_vm-part-cloudspaces":
return create_vm_flow_part_images(parent_message_key, steps, service_identity, email, app_id)
elif flush_id == u"flush_create_vm-part-images":
return create_vm_flow_part_info(parent_message_key, steps, service_identity, email, app_id)
elif flush_id == u"flush_create_vm":
return create_vm(parent_message_key, steps, service_identity, email, app_id)
return None
def system_api_call(email, method, params, tag, service_identity, user_details):
email, app_id = get_email_and_app_id_from_userdetails(user_details)
if method and method == u"machine.getConsoleStartDetails":
return get_machine_console_info(service_identity, email, app_id, params)
return None
def friend_invite_result(user_details, service_identity, **params):
'''Rogerthat callback when a connection is made.'''
email, app_id = get_email_and_app_id_from_userdetails(user_details)
logout(service_identity, email, app_id)
| rogerthat-platform/rogerthat-gig-g8 | src/gig/rogerthat_callbacks.py | Python | bsd-2-clause | 3,882 |
DEBUG = True
HOST = "0.0.0.0"
PORT = 8081
| sudharsh/ragesquared | config.py | Python | apache-2.0 | 42 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssh_local_ca
short_description: SSH proxy local CA in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_ssh feature and local_ca category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
version_added: 2.9
firewall_ssh_local_ca:
description:
- SSH proxy local CA.
default: null
type: dict
suboptions:
name:
description:
- SSH proxy local CA name.
required: true
type: str
password:
description:
- Password for SSH private key.
type: str
private_key:
description:
- SSH proxy private key, encrypted with a password.
type: str
public_key:
description:
- SSH proxy public key.
type: str
source:
description:
- SSH proxy local CA source type.
type: str
choices:
- built-in
- user
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: SSH proxy local CA.
fortios_firewall_ssh_local_ca:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ssh_local_ca:
name: "default_name_3"
password: "<your_own_value>"
private_key: "<your_own_value>"
public_key: "<your_own_value>"
source: "built-in"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ssh_local_ca_data(json):
option_list = ['name', 'password', 'private_key',
'public_key', 'source']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ssh_local_ca(data, fos):
vdom = data['vdom']
state = data['state']
firewall_ssh_local_ca_data = data['firewall_ssh_local_ca']
filtered_data = underscore_to_hyphen(filter_firewall_ssh_local_ca_data(firewall_ssh_local_ca_data))
if state == "present":
return fos.set('firewall.ssh',
'local-ca',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.ssh',
'local-ca',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall_ssh(data, fos):
if data['firewall_ssh_local_ca']:
resp = firewall_ssh_local_ca(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"firewall_ssh_local_ca": {
"required": False, "type": "dict", "default": None,
"options": {
"name": {"required": True, "type": "str"},
"password": {"required": False, "type": "str"},
"private_key": {"required": False, "type": "str"},
"public_key": {"required": False, "type": "str"},
"source": {"required": False, "type": "str",
"choices": ["built-in", "user"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall_ssh(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| amenonsen/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ssh_local_ca.py | Python | gpl-3.0 | 10,186 |
import random
from locust import HttpUser, task, between
import json
class QuickstartUser(HttpUser):
wait_time = between(5, 9)
test_data = []
@task
def process_text(self):
n = random.randint(0, len(self.test_data) - 1)
paragraph = self.test_data[n]
headers = {"Accept": "application/json"}
files = {'text': str(paragraph)}
self.client.post(url="/quantities/service/processQuantityText", files=files,
headers=headers, name="quantity_text")
# @task(3)
# def view_item(self):
# # item_id = random.randint(1, 10000)
# # self.client.get(f"/item?id={item_id}", name="/item")
# pass
#
def on_start(self):
with open("testData.txt", 'r') as fp:
lines = fp.readlines()
self.test_data.extend(lines)
| kermitt2/grobid-quantities | resources/locust/locustfile.py | Python | apache-2.0 | 849 |
import os
import socket
import sys
import threading
import time
import traceback
import urlparse
import uuid
from .base import (ExecutorException,
Protocol,
RefTestExecutor,
RefTestImplementation,
TestExecutor,
TestharnessExecutor,
testharness_result_converter,
reftest_result_converter,
strip_server)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
extra_timeout = 5
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
class SeleniumProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def setup(self, runner):
"""Connect to browser via Selenium's WebDriver implementation."""
self.runner = runner
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
session_started = False
try:
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
except:
self.logger.warning(
"Connecting to Selenium failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("Selenium session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect to Selenium")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except:
print >> sys.stderr, traceback.format_exc()
self.logger.warning(
"Failed to connect to navigate initial page")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.load_runner("http")
def load_runner(self, protocol):
url = urlparse.urljoin(self.executor.server_url(protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.webdriver.execute_script("document.title = '%s'" %
threading.current_thread().name.replace("'", '"'))
def wait(self):
while True:
try:
self.webdriver.execute_async_script("");
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumRun(object):
def __init__(self, func, webdriver, url, timeout):
self.func = func
self.result = None
self.webdriver = webdriver
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.webdriver, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol.webdriver,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, webdriver, url, timeout):
return webdriver.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000})
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
self.protocol.webdriver.set_window_size(600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol.webdriver,
self.test_url(test),
test.timeout).run()
def _screenshot(self, webdriver, url, timeout):
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
| bzbarsky/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/executors/executorselenium.py | Python | mpl-2.0 | 9,336 |
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def load_from_mat(dataset_path):
import h5py
if dataset_path != '':
img = np.array(h5py.File(dataset_path)['var'])
return img
else:
return None
def gaussian_kernel(kernel_size, sigma):
kernel = np.zeros((int(kernel_size),int(kernel_size)))
center_index_x = float(kernel_size)/2-0.5
center_index_y = float(kernel_size)/2-0.5
for i in range(kernel.shape[0]):
for j in range(kernel.shape[1]):
x = center_index_x - i
y = center_index_y - j
value = (1.000/(np.sqrt(2*np.pi*float(sigma)**2)))*(math.exp(-(float(x)**2+float(y)**2)/(2*float(sigma)**2)))
kernel[i,j] = value
return kernel
def kernel_application(kernel, img):
img = np.pad(img, ((int(kernel.shape[0])-1)/2), mode='constant')
new_img = np.zeros(np.shape(img))
print(img.shape)
for ii in range(img.shape[0] - (kernel.shape[0]-1)):
for jj in range(img.shape[1] - (kernel.shape[1]- 1)):
sub_array = img[ii:ii+(kernel.shape[0]), jj:jj+(kernel.shape[1])]
new_array = np.multiply(kernel, sub_array)
sum_value = np.sum(new_array)
new_img[ii,jj] = sum_value
new_img = new_img[0:new_img.shape[0]-(int(kernel.shape[0])-1),0:new_img.shape[1]-(int(kernel.shape[0])-1)]
return new_img
# load_from_mat
img = load_from_mat("in.mat")
img = np.transpose(img)
# gaussian_kernel
kernel = gaussian_kernel(31, 3)
# kernel_application
new_img = kernel_application(kernel, img)
# black_background
for i in range(new_img.shape[0]):
for j in range(new_img.shape[1]):
if new_img[i,j] < 0:
new_img[i,j] = 0
# download
fig = plt.figure(frameon=False)
fig.set_size_inches(new_img.shape[1]/10,new_img.shape[0]/10)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.gray()
ax.imshow(new_img, aspect='normal')
fig.savefig("out.png")
| jevanyu/CEUS-Filters | gaussian_blur.py | Python | gpl-3.0 | 2,002 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time, re
from datetime import datetime
from django.shortcuts import render
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils import translation
from django.http import Http404, HttpResponse, JsonResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.core.urlresolvers import reverse
from django.forms.models import formset_factory, modelformset_factory, inlineformset_factory
from django.db import IntegrityError, transaction
from askkit import settings
from users.models import *
from questions.models import *
from questions.forms import *
from core.forms import *
# Thanks to: http://stackoverflow.com/questions/13998901/generating-a-random-hex-color-in-python
def get_html_color_code():
r = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
# Thanks to: http://stackoverflow.com/a/5976065
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def question(request, name, question_token):
# ####################################################################
# ######################### Inner Functions ##########################
# ####################################################################
def colour_replies(replies):
"""
Generate colours for replies.
"""
html_color_obj = [
{'color':"#FDB45C",'highlight':"#FFC870"},
{'color':"#949FB1",'highlight':"#A8B3C5"},
{'color':"#4D5360",'highlight':"#616774"},
{'color':"#F7464A",'highlight':"#FF5A5E"},
{'color':"#46BFBD",'highlight':"#5AD3D1"},
{'color':"#4DA519",'highlight':"#5AD3D1"},
{'color':"#7393E7",'highlight':"#5AD3D1"},
{'color':"#7537CC",'highlight':"#5AD3D1"},
{'color':"#A0A42A",'highlight':"#5AD3D1"},
{'color':"#ACD287",'highlight':"#5AD3D1"},
{'color':"#275055",'highlight':"#5AD3D1"},
{'color':"#AF7210",'highlight':"#5AD3D1"},
]
# IF remain equal 0, ratings doesn't show.
total_hits = 0
for idx, reply in enumerate(replies):
color = None
total_hits += reply.hits
try:
color = html_color_obj[idx]['color']
except Exception:
color = get_html_color_code()
reply.color = color
return replies
# ####################################################################
def profile_has_voted(requestProfile, question):
voted = ReplyVotedBy.objects.filter(voter=requestProfile, question=question).count()
if voted == 1:
return True
elif voted == 0:
return False
else:
#Hacked
return False
# ####################################################################
def anonymous_has_voted(fromIp, question):
voted = ReplyVotedBy.objects.filter(fromIp=fromIp, question=question).count()
if voted == 1:
return True
elif voted == 0:
return False
else:
# Hacked. Implement some alert system.
return False
# ####################################################################
def is_voted_by(question, requestProfile, fromIp):
if request.user.is_anonymous():
return anonymous_has_voted(fromIp, question)
else:
return profile_has_voted(requestProfile, question)
# ####################################################################
def show(question, voted):
show = True
hide_not_available_msg = False
if question.data_require_vote and not voted:
show = False
if question.hide_data:
show = False
return (show, hide_not_available_msg)
# ####################################################################
def comment_empy(commentForm):
return commentForm.cleaned_data['text'].isspace()
# ####################################################################
def profile_owns_question(profile, question):
if profile == question.asker:
return True
else:
return False
# ####################################################################
# #################### question request init #########################
# ####################################################################
try:
ip = get_client_ip(request)
#timezone_cookie = request.COOKIES.get('askkit_timezone').replace('%2F','/')
request_profile = None
pro_owns_ques = False
question = Question.objects.get(token=question_token)
replies = question.replies.all()
if not request.user.is_anonymous():
request_profile = Profile.objects.get(user=request.user)
pro_owns_ques = profile_owns_question(request_profile, question)
# Thanks to: https://wiki.python.org/moin/HowTo/Sorting
replies_ordered = sorted(replies, key=lambda reply: reply.hits, reverse=True)
replies = colour_replies(replies)
voted = is_voted_by(question, request_profile, ip)
makeCommentForm = MakeCommentModelForm()
if request.method == 'POST':
makeCommentForm = MakeCommentModelForm(request.POST)
if makeCommentForm.is_valid() and request_profile != None and not comment_empy(makeCommentForm):
valid = True
comment = makeCommentForm.save(commit=False)
comment.question = question
comment.commenter = request_profile
comment.save()
makeCommentForm = MakeCommentModelForm()
except (Profile.DoesNotExist, User.DoesNotExist, Question.DoesNotExist):
raise Http404
except Exception, e:
error = True
template = "questions/question.html"
return render(request, template, locals())
@csrf_protect
def vote_reply(request):
# Checks if ID_value is an integer
def check_token(value):
return True
###################
### ERROR
error_code = 0
###################
### BAD_PARAMS
bad_params_code = 1
###################
### EQUAL_USER
equal_users_code = 2
###################
### ALRY_VOTED
alry_voted_code = 3
###################
### REPLY DOESN'T MACH THE QUESTION
reply_question_doesnt_match = 4
###################
### VOTED
voted = 5
###################
### USED IP
used_ip = 6
###################
### ANONYMOUS VOTER NOT ALLOWED
not_anonymous_voter = 7
###################
### OUT OF DATE
out_of_date = 8
######################################################################
response = None
######################################################################
if request.method == 'POST':
try:
# Stores Request IP
client_ip = get_client_ip(request)
qtoken = request.POST.get("qtoken") or None
rtoken = request.POST.get("rtoken") or None
# Checks incoming params are valid
if check_token(qtoken) and check_token(rtoken):
# DB Search checks that rtoken is a reply of qtoken. If not,
# return error code
try:
reply = Reply.objects.get(token=rtoken, question__token=qtoken)
except Reply.DoesNotExist:
reply = None
# If rtoken is a reply of question. PARAMS OK
if reply:
# Retrieves rtoken's question object.
question = Question.objects.get(token=qtoken)
# Cheks vote request is on date
if question.is_active():
# Retrieves question's replies objects
replies = question.replies.all()
# If user is anonymous
if request.user.is_anonymous():
# If qustion allow anonnymous votes
if question.allow_anonymous_voter:
# Search if request IP has voted yet.
replyVotedBy = ReplyVotedBy.objects.filter(fromIp=client_ip, reply=replies)
######################### [[REVIEW THIS!!!]] #######################
# If someone try to make an anonymous vote from an ip that
# has been used by an account to vote this resply, the above search
# will retrieve one or more objects, not allowing (by the "if" below )
# the ip to vote. This could be get over suggesting the ip to make
# an account in the service.
#
# Its possible get all registered user ip's for this question and compare
# them with the incoming IP. If any registered user has used the same ip
# we could let anonymous user to vote, but this translates in a
# two-votes-per-user scenario, one made anonymously and other made by
# the user account.
####################################################################
# If it has voted
if len(replyVotedBy) > 0:
# ALREADY_VOTED #####################################################
response = JsonResponse({'status': used_ip})
else:
try:
with transaction.atomic():
replyVotedBy = ReplyVotedBy(voter=None, reply=reply, question=question, fromIp=client_ip)
reply.hits += 1
question.votes += 1
replyVotedBy.save()
reply.save()
question.save()
except IntegrityError:
#messages.error(request, 'There was an error.')
pass
# ANONYMOUS - VOTED #################################################
response = JsonResponse({'status': voted})
else:
response = JsonResponse({'status': not_anonymous_voter})
else:
# Search if request IP has voted yet.
voter = Profile.objects.get(user=request.user)
repliesVotedBy = ReplyVotedBy.objects.filter(voter=voter, reply=replies, question=question)
# QUESTION already answered
if len(repliesVotedBy) != 0:
# ALREADY_VOTED #####################################################
response = JsonResponse({'status': alry_voted_code})
else:
#resplies = ReplyVotedBy.objects.filter(voter=voter, reply=reply)
try:
with transaction.atomic():
replyVotedBy = ReplyVotedBy(voter=voter, reply=reply, question=question, fromIp=client_ip)
reply.hits += 1
question.votes += 1
replyVotedBy.save()
reply.save()
question.save()
except IntegrityError:
#messages.error(request, 'There was an error.')
pass
# REGISTERED - VOTED ################################################
response = JsonResponse({'status': voted})
else:
# Vote request is out of date
response = JsonResponse({'status': out_of_date})
else:
# REPLY DOESN'T MACHT THE QUESTION ###########################################
response = JsonResponse({'status': reply_question_doesnt_match})
else:
response = JsonResponse({'status': bad_params_code})
except Exception as e:
if settings.DEBUG:
response = JsonResponse({'status': error_code, 'info': str(e)})
else:
response = JsonResponse({'status': error_code,})
if settings.DEBUG:
time.sleep(1)
return HttpResponse(response, content_type="application/json")
else:
raise Http404
def question_create(request):
fake_get = False
replies = 2
if request.user.is_anonymous():
request_user = None
# Move to settings files or db.
max_replies = 2
warnings = ['anon']
else:
request_user = Profile.objects.get(user=request.user)
# Better move to settings to db.
max_replies = settings.MAX_REPLIES_REGISTERED
# Due we make a post request to add/delete replies, we search on post params its key.
for key in request.POST:
if key.startswith('add_reply'):
replies = int(key[len('add_reply')+1:])
replies += 1
fake_get = True
break
if key.startswith('delete_reply'):
replies = int(key[len('delete_reply')+1:])
replies -= 1
fake_get = True
break
MakeReplyFormSet = modelformset_factory(Reply, ReplyForm, extra=replies, max_num=max_replies, validate_max=True)
captchaForm = CaptchaForm()
if request.method == 'POST':
makeQuestionForm = QuestionForm(request.POST, prefix='question')
makeReplyFormSet = MakeReplyFormSet(request.POST, prefix='replies')
captchaForm = CaptchaForm(request.POST, prefix='captcha')
makeQuestionForm.form_asker = request_user
# Its avoid form reset on post_save call. STRANGE.
if fake_get:
request.POST['replies-TOTAL_FORMS'] = replies
# First logic operator avoid form save on add/delete reply call.
if not fake_get and makeQuestionForm.is_valid() and makeReplyFormSet.is_valid() and captchaForm.is_valid():
question = makeQuestionForm.save(commit=False)
question.asker = request_user
question.fromIp = get_client_ip(request)
question.save()
makeQuestionForm.save_m2m()
replies = makeReplyFormSet.save(commit=False)
for reply in replies:
reply.question = question
reply.save()
return HttpResponseRedirect(reverse('question', args=[question.asker or 'anon', question.token]))
if request.method == 'GET':
makeQuestionForm = QuestionForm(prefix='question')
makeReplyFormSet = MakeReplyFormSet(queryset=Reply.objects.filter(question__id=None), prefix='replies')
template = 'questions/create.html'
return render(request,template,locals())
def question_update(request, name, question_token):
# if anon user try to edit a question, redirect to dashboard
if request.user.is_anonymous():
return HttpResponseRedirect(reverse('dashboard'))
else:
# Searchs if question to edit belongs to request user...
question = Question.objects.filter(asker=Profile.objects.filter(user=request.user), token=question_token)
if len(question) == 0:
# Does not belongs
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
elif len(question) > 1:
# ###########################################################################
# THIS SHOULD NEVER HAPPEND #################################################
# ###########################################################################
# There is two or more quetions token asociated to the same user and token.
# This should stores and notify the incident.
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
else:
# Question belongs to request's user
question = question[0]
replies = question.replies.all()
replies_count = len(replies)
fake_get = False
if request.user.is_anonymous():
request_user = None
max_replies = 2
else:
request_user = Profile.objects.get(user=request.user)
max_replies = settings.MAX_REPLIES_REGISTERED
# Due we make a post request to add/delete replies, we search on post params its key.
for key in request.POST:
if key.startswith('add_reply'):
replies_count = int(key[len('add_reply')+1:])
replies_count += 1
fake_get = True
break
if key.startswith('delete_reply'):
replies_count = int(key[len('delete_reply')+1:])
replies_count -= 1
fake_get = True
break
updateQuestionForm = QuestionEditForm(request.POST or None, instance=question, prefix='question')
UpdateReplyFormSet = inlineformset_factory(Question, Reply, form=ReplyForm, extra=0, can_delete=True)
updateReplyFormSet = UpdateReplyFormSet(instance=question)
#captchaForm = CaptchaForm()
if request.method == 'POST':
if fake_get:
request.POST['replies-TOTAL_FORMS'] = replies_count
updateReplyFormSet = UpdateReplyFormSet(request.POST, instance=question)
#captchaForm = CaptchaForm(request.POST, prefix='captcha')
# First logic operator avoid form save on add/delete reply call.
#if not fake_get and updateQuestionForm.is_valid() and updateReplyFormSet.is_valid() and captchaForm.is_valid():
if not fake_get and updateQuestionForm.is_valid() and updateReplyFormSet.is_valid():
try:
with transaction.atomic():
updateQuestionForm.save()
updateReplyFormSet.save()
except IntegrityError:
#messages.error(request, 'There was an error.')
pass
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
template = 'questions/update.html'
return render(request,template,locals())
def question_make_anon(request, name, question_token):
# if anon user try to edit a question, redirect to dashboard
if request.user.is_anonymous():
return HttpResponseRedirect(reverse('dashboard'))
else:
# Searchs if question to anon belongs to request user...
question = Question.objects.filter(asker=Profile.objects.filter(user=request.user), token=question_token)
if len(question) == 0:
# Does not belongs
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
elif len(question) > 1:
# ###########################################################################
# THIS SHOULD NEVER HAPPEND #################################################
# ###########################################################################
# There is two or more quetions token asociated to the same user and token.
# This should stores and notify the incident.
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
else:
try:
with transaction.atomic():
question = question[0]
question.asker = None
question.fromIp = None
question.data_require_vote = True
question.hide_data = False
question.public = True
######################################
# Think about this ###################
question.allow_anonymous_voter = True
######################################
# Should we generate a new token?
######################################
question.save()
return HttpResponseRedirect(reverse('question', args=['anon', question_token]))
except IntegrityError:
#messages.error(request, 'There was an error.')
pass
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
def question_delete(request, name, question_token):
# if anon user try to edit a question, redirect to dashboard
if request.user.is_anonymous():
return HttpResponseRedirect(reverse('dashboard'))
else:
# Searchs if question to anon belongs to request user...
question = Question.objects.filter(asker=Profile.objects.filter(user=request.user), token=question_token)
if len(question) == 0:
# Does not belongs
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
elif len(question) > 1:
# ###########################################################################
# THIS SHOULD NEVER HAPPEND #################################################
# ###########################################################################
# There is two or more quetions token asociated to the same user and token.
# This should stores and notify the incident.
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
else:
try:
with transaction.atomic():
question = question[0]
# Implement anon copy.
"""
question.id = None
question.asker = None
question.fromIp = None
question.token = 'default'
#question_copied = question.save(commit=False)
# Copy question
question_copied = Question(
asker=None,
fromIp=None,
allow_anonymous_voter = question.allow_anonymous_voter,
context = question.context,
data_require_vote = question.data_require_vote,
date = question.date,
date_begin = question.date_begin,
date_end = question.date_end,
hide_data = question.hide_data,
public = question.public,
question = question.question,
votes = question.votes
)
question_copied.save()
"""
question.delete()
return HttpResponseRedirect(reverse('dashboard'))
except IntegrityError as err:
#messages.error(request, 'There was an error.')
pass
return HttpResponseRedirect(reverse('question', args=[name, question_token]))
# ##########################################################################################
# #################### Class bases views test ##############################################
# ################ ONLY FOR DEVELOPMENT PURPOSES ###########################################
class QuestionDetailView(DetailView):
model = Question
"""
def get_context_data(self, **kwargs):
context = super(QuestionDetailView, self).get_context_data(**kwargs)
context['now'] = timezone.now()
return context
"""
def get_object(self):
return Question.objects.get(token=self.kwargs.get("question_token"))
#return Question.objects.get(name=self.kwargs.get("name"))
class QuestionCreate(CreateView):
form_class = QuestionReplyMultiForm
template_name = 'questions/question_form.html'
"""
def get_context_data(self, **kwargs):
context = super(QuestionCreate, self).get_context_data(**kwargs)
# Add vars to context. For example, to render it in template.
return context
"""
"""
def get_form_kwargs(self):
kwargs = super(QuestionCreate, self).get_form_kwargs()
if self.request.method in ('POST', 'PUT'):
profile = None
if not self.request.user.is_anonymous():
profile = Profile.objects.get(user=self.request.user)
kwargs['initial']['asker'] = profile
kwargs['initial']['fromIp'] = get_client_ip(self.request)
print kwargs
return kwargs
"""
# Changes this method to avoid sendind profile and ip data to the client ASAP.
# It should be added to the form in the clean and save method.
def get_initial(self):
profile = None
if not self.request.user.is_anonymous():
profile = Profile.objects.get(user=self.request.user)
return {
'question': {
'asker': profile,
'fromIp': get_client_ip(self.request),
},
'reply': {
# Profile's initial data
},
}
#return {'asker': profile, 'fromIp': get_client_ip(self.request)}
def form_valid(self, form):
# Form is validated and ready to save. Then do the following below.
profile = None
if not self.request.user.is_anonymous():
profile = Profile.objects.get(user=self.request.user)
form.instance.asker = profile
form.instance.fromIp = get_client_ip(self.request)
return super(QuestionCreate, self).form_valid(form)
def get_success_url(self):
return reverse('question', args=[self.object.asker or 'anon', self.object.token])
class QuestionEdit(UpdateView):
#model = Question
form_class = QuestionEditForm
#fields = [ 'question', 'context', 'date_begin', 'date_end', 'allow_anonymous_voter', 'data_require_vote', 'hide_data', 'public',]
template_name = 'questions/update.html'
def dispatch(self, *args, **kwargs):
if self.request.user.is_anonymous():
return HttpResponseRedirect(reverse('dashboard'))
else:
question = Question.objects.filter(asker=Profile.objects.filter(user=self.request.user), token=self.kwargs.get("question_token"))
if len(question) == 0:
return HttpResponseRedirect(reverse('question', args=[self.kwargs.get("name"), self.kwargs.get("question_token")]))
elif len(question) > 1:
# ###########################################################################
# THIS SHOULD NEVER HAPPEND #################################################
# ###########################################################################
# There is two or more quetions token asociated to the same user and token.
# This should stores and notify the incident.
return HttpResponseRedirect(reverse('question', args=[self.kwargs.get("name"), self.kwargs.get("question_token")]))
else:
# Question belongs to request's user
pass
return super(QuestionEdit, self).dispatch(*args, **kwargs)
def get_object(self):
return Question.objects.get(token=self.kwargs.get("question_token"))
#return Question.objects.get(name=self.kwargs.get("name"))
def get_success_url(self):
return reverse('question', args=[self.kwargs.get("name"), self.kwargs.get("question_token")])
| sergiorb/askkit | questions/views.py | Python | apache-2.0 | 23,916 |
import unittest
import chord
import tests.commons
class TestInitFingers(unittest.TestCase):
def setUp(self):
self.nodes = tests.commons.createlocalnodes(2, stabilizer=False)
def tearDown(self):
tests.commons.stoplocalnodes(self.nodes)
def test_init_fingers(self):
self.nodes[1].init_fingers(chord.NodeInterface(self.nodes[0].asdict()))
self.assertEqual(self.nodes[1].successor.uid, self.nodes[0].uid)
self.assertEqual(self.nodes[0].successor.uid, self.nodes[1].uid)
self.assertEqual(self.nodes[0].predecessor.uid, self.nodes[1].uid)
self.assertEqual(self.nodes[1].predecessor.uid, self.nodes[0].uid)
for i in range(0, self.nodes[0].uid.idlength):
if self.nodes[1].fingers[i].key.isbetween(self.nodes[1].uid, self.nodes[0].uid):
self.assertEqual(self.nodes[1].fingers[i].respNode.uid, self.nodes[0].uid)
else:
self.assertEqual(self.nodes[1].fingers[i].respNode.uid, self.nodes[1].uid)
| Titotix/pychord | tests/test_init_finger.py | Python | gpl-3.0 | 1,019 |
from abc import *
import re
import collections
import sys
import ipaddress
from curses.ascii import isgraph
from enum import Enum, Flag, IntEnum
from point.gemini_exceptions import *
# TODO: print command/response class name in exception messages more often / more consistently
####################################################################################################
_re_int = re.compile(r'^([-+]?)(\d+)$', re.ASCII)
_re_ang_dbl = re.compile(r'^([-+]?)(\d{1,3}\.\d{6})$', re.ASCII)
_re_ang_high = re.compile(r'^([-+]?)(\d{1,2}):(\d{1,2}):(\d{1,2})$', re.ASCII)
_re_ang_low = re.compile(r'^([-+]?)(\d{1,3})' + '\xDF' + r'(\d{1,2})$', re.ASCII)
_re_time_dbl = re.compile(r'^([-+]?)(\d+\.\d{6})$', re.ASCII)
_re_time_hilo = re.compile(r'^(\d{1,2}):(\d{1,2}):(\d{1,2})$', re.ASCII)
_re_revisions = re.compile(r'^.{8}$', re.ASCII)
_re_ipv4addr = re.compile(r'^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$', re.ASCII)
def parse_int(string):
match = _re_int.fullmatch(string)
if match is None: raise G2ResponseIntegerParseError(string)
return int(match.expand(r'\1\2'))
def parse_int_bounds(string, bound_min, bound_max):
if bound_min > bound_max:
raise G2ResponseParseError('bound_min {} > bound_max {})'.format(bound_min, bound_max))
val = parse_int(string)
if val < bound_min or val > bound_max:
raise G2ResponseIntegerBoundsViolation(val, bound_min, bound_max)
return val
def parse_ang_dbl(string):
match = _re_ang_dbl.fullmatch(string)
if match is None: raise G2ResponseAngleParseError(string, 'double')
return float(match.expand(r'\1\2'))
def parse_ang_high(string):
match = _re_ang_high.fullmatch(string)
if match is None: raise G2ResponseAngleParseError(string, 'high')
f_deg = float(match.expand(r'\1\2'))
f_min = float(match.expand(r'\1\3'))
f_sec = float(match.expand(r'\1\4'))
return (f_deg + (f_min / 60.0) + (f_sec / 3600.0))
def parse_ang_low(string):
match = _re_ang_low.fullmatch(string)
if match is None: raise G2ResponseAngleParseError(string, 'low')
f_deg = float(match.expand(r'\1\2'))
f_min = float(match.expand(r'\1\3'))
return (f_deg + (f_min / 60.0))
def parse_ang(string, precision):
if not isinstance(precision, G2Precision):
raise G2ResponseParseError('parse_ang: not isinstance(precision, G2Precision)')
if precision == G2Precision.DOUBLE: return parse_ang_dbl (string)
elif precision == G2Precision.HIGH: return parse_ang_high(string)
elif precision == G2Precision.LOW: return parse_ang_low (string)
def parse_time_dbl(string):
match = _re_time_dbl.fullmatch(string)
if match is None: raise G2ResponseTimeParseError(string, 'double')
return float(match.expand(r'\1\2'))
def parse_time_hilo(string):
match = _re_time_hilo.fullmatch(string)
if match is None: raise G2ResponseTimeParseError(string, 'high/low')
i_hour = int(match[1])
i_min = int(match[2])
i_sec = int(match[3])
# TODO: bounds check on hour field...? and should we even be limiting the hour field to 2 digits in the RE?
if i_min >= 60 or i_sec >= 60: raise G2ResponseTimeParseError(string, 'high/low')
return float((i_hour * 3600) + (i_min * 60) + i_sec)
def parse_time(string, precision):
if not isinstance(precision, G2Precision):
raise G2ResponseParseError('parse_time: not isinstance(precision, G2Precision)')
if precision == G2Precision.DOUBLE: return parse_time_dbl (string)
else: return parse_time_hilo(string)
def parse_revisions(string):
match = _re_revisions.fullmatch(string)
if match is None: raise G2ResponseRevisionsParseError(string)
vals = []
for char in string:
val = ord(char)
if val < 0x30 or val > 0x7E: raise G2ResponseRevisionsParseError(string)
vals.append(val - 0x30)
if len(vals) != 8: raise G2ResponseRevisionsParseError(string)
return vals
def parse_ip4vaddr(string):
match = _re_ipv4addr.fullmatch(string)
if match is None: raise G2ResponseIPv4AddressParseError(string)
if int(match[1]) < 0 or int(match[1]) > 255: raise G2ResponseIPv4AddressParseError(string)
if int(match[2]) < 0 or int(match[2]) > 255: raise G2ResponseIPv4AddressParseError(string)
if int(match[3]) < 0 or int(match[3]) > 255: raise G2ResponseIPv4AddressParseError(string)
if int(match[4]) < 0 or int(match[4]) > 255: raise G2ResponseIPv4AddressParseError(string)
return ipaddress.IPv4Address(string)
####################################################################################################
# returns tuple: (int:sign[-1|0|+1], int:hour, int:min, int:sec)
def ang_to_hourminsec(ang):
return ang_to_degminsec(ang * 24.0 / 360.0)
# returns tuple: (int:sign[-1|0|+1], int:deg, int:min, int:sec)
def ang_to_degminsec(ang):
if ang > 0.0: sign = +1.0
elif ang < 0.0: sign = -1.0
else: sign = 0.0
ang = abs(ang) * 3600.0
i_sec = int(ang % 60.0) # TODO: change this to round(), if we can fix the round-up-to-60 issues
ang /= 60.0
i_min = int(ang % 60.0)
ang /= 60.0
i_deg = int(ang)
return (sign, i_deg, i_min, i_sec)
# returns tuple: (int:sign[-1|0|+1], int:deg, int:min)
def ang_to_degmin(ang):
if ang > 0.0: sign = +1.0
elif ang < 0.0: sign = -1.0
else: sign = 0.0
ang = abs(ang) * 60.0
i_min = int(ang % 60.0) # TODO: change this to round(), if we can fix the round-up-to-60 issues
ang /= 60.0
i_deg = int(ang)
return (sign, i_deg, i_min)
####################################################################################################
class Gemini2Command(ABC):
# IMPLEMENTED AT THE PROTOCOL-SPECIFIC SUBCLASS LEVEL (LX200, Native, etc)
# purpose: takes info from the command-specific subclass and turns it into a raw cmd string with
# prefix, postfix, checksum, etc that's completely ready to be shoved onto the backend
# return: string containing fully encoded raw command with prefix, postfix, checksum, etc
@abstractmethod
def encode(self): pass
# return: a Gemini2Response-derived object if this command expects a response
# return: None if this command does not expect a response
@abstractmethod
def response(self): pass
# return: False if this particular command is not valid for the given backend type
def valid_for_serial(self): return True
def valid_for_udp(self): return True
# shared code between subclasses
def _check_bad_chars(self, string, bad_chars):
for char in bad_chars:
if char in string:
if isgraph(char):
raise G2CommandBadCharacterError('command {:s}: '
'contains \'{}\''.format(self.__class__.__name__, char))
else:
raise G2CommandBadCharacterError('command {:s}: '
'contains \'\\x{:02X}\''.format(self.__class__.__name__, ord(char)))
# ==================================================================================================
class Gemini2Command_ACK(Gemini2Command):
def encode(self):
return '\x06'
# --------------------------------------------------------------------------------------------------
class Gemini2Command_Macro(Gemini2Command):
def encode(self):
return self.cmd_str()
# return: the character(s) to send for this macro command
@abstractmethod
def cmd_str(self): pass
# --------------------------------------------------------------------------------------------------
class Gemini2Command_LX200(Gemini2Command):
def encode(self):
cmd_str = self.lx200_str()
self._check_validity(cmd_str)
return ':{:s}#'.format(cmd_str)
# IMPLEMENTED AT THE COMMAND-SPECIFIC SUBCLASS LEVEL (Echo etc)
# purpose: takes params supplied via the ctor or otherwise (if any) and builds the basic cmd str
# return: string containing essential cmd info characters
@abstractmethod
def lx200_str(self): pass
def _check_validity(self, cmd_str):
# TODO: do a more rigorous valid-character-range check here
self._check_bad_chars(cmd_str, ['#', '\x00', '\x06'])
class Gemini2Command_LX200_NoReply(Gemini2Command_LX200):
def response(self):
return None
# --------------------------------------------------------------------------------------------------
class Gemini2Command_Native(Gemini2Command):
def encode(self):
assert isinstance(self.native_id(), int)
params_str = self._make_params_str(self.native_params())
cmd_str = '{:s}{:d}:{:s}'.format(self.native_prefix(), self.native_id(), params_str)
return '{:s}{:s}#'.format(cmd_str, chr(self._compute_checksum(cmd_str)))
# IMPLEMENTED AT THE COMMAND-SPECIFIC SUBCLASS LEVEL (GetMountType etc)
# return: native command ID number
@abstractmethod
def native_id(self): pass
# IMPLEMENTED AT THE COMMAND-SPECIFIC SUBCLASS LEVEL (GetMountType etc)
# return: None if no parameters are to be sent along with the command
# return: a parameter, or list-of-parameters, to be sent along with the command
def native_params(self):
return None
@abstractmethod
def native_prefix(self): pass
def _make_params_str(self, params):
assert sys.version_info[0] >= 3 # our string type check below is incompatible with Python 2
if params is None:
return ''
elif isinstance(params, collections.Iterable) and (not isinstance(params, str)):
for param in params:
self._check_validity(str(param))
return ':'.join(params)
else:
self._check_validity(str(params))
return str(params)
def _check_validity(self, param_str):
# TODO: do a more rigorous valid-character-range check here
self._check_bad_chars(param_str, ['<', '>', ':', '#', '\x00', '\x06'])
# TODO: move this to somewhere common between cmd and response
def _compute_checksum(self, cmd_str):
csum = 0
for char in cmd_str:
csum = csum ^ ord(char)
csum = (csum % 128) + 64
assert csum >= 0x40 and csum < 0xC0
return csum
class Gemini2Command_Native_Get(Gemini2Command_Native):
def native_prefix(self):
return '<'
class Gemini2Command_Native_Set(Gemini2Command_Native):
def native_prefix(self):
return '>'
# TODO: verify whether this is correct, or if SET's ever respond with stuff
def response(self):
return None
####################################################################################################
class Gemini2Response(ABC):
DecoderType = Enum('DecoderType', ['FIXED_LENGTH', 'HASH_TERMINATED', 'SEMICOLON_DELIMITED'])
class Decoder(ABC):
def __init__(self, type, zero_len_hack=False):
self._type = type
self._zero_len_hack = zero_len_hack
def type(self):
return self._type
# whether we want to be able to process possibly-zero-length responses
# (this requires a bunch of extra hack garbage in the serial backend)
def zero_len_hack(self):
return self._zero_len_hack
# return: tuple: ([decoded_str OR list-of-decoded_strs], num_chars_processed)
@abstractmethod
def decode(self, chars): pass
class FixedLengthDecoder(Decoder):
def __init__(self, fixed_len, zero_len_hack=False):
super().__init__(Gemini2Response.DecoderType.FIXED_LENGTH, zero_len_hack)
assert fixed_len >= 0
self._fixed_len = fixed_len
def fixed_len(self):
return self._fixed_len
def decode(self, chars):
idx = self.fixed_len()
if len(chars) < idx:
raise G2ResponseTooShortError(len(chars), idx)
return (chars[:idx], idx)
class HashTerminatedDecoder(Decoder):
def __init__(self):
super().__init__(Gemini2Response.DecoderType.HASH_TERMINATED)
def decode(self, chars):
idx = chars.find('#')
if idx == -1:
raise G2ResponseMissingTerminatorError(len(chars))
return (chars[:idx], idx + 1)
# SERIOUS ISSUE: the 'revisions' (native #97) field contains chars in the range of 0x30 ~ 0x7E,
# inclusive; this happens to include the semicolon character. so we end up spuriously
# interpreting revision chars as field delimiters in those cases!
# TEMPORARY WORKAROUND:
# - SemicolonDelimitedDecoder.decode:
# - remove assertion for number of fields
# - replace total_len calculation with fake calculation
# - G2Rsp_MacroENQ.interpret:
# - remove parsing of "later" fields, since we don't CURRENTLY need them
# TODO: report this to Rene!
class SemicolonDelimitedDecoder(Decoder):
def __init__(self, num_fields):
super().__init__(Gemini2Response.DecoderType.SEMICOLON_DELIMITED)
assert num_fields >= 0
self._num_fields = num_fields
def num_fields(self):
return self._num_fields
def decode(self, chars):
fields = chars.split(';', self._num_fields)
if len(fields) <= self._num_fields:
raise G2ResponseTooFewDelimitersError(len(chars), len(fields), self._num_fields)
# assert len(fields) == self._num_fields + 1
fields = fields[:-1]
# total_len = (len(fields) + sum(len(field) for field in fields))
total_len = len(chars) # !!! REMOVE ME !!!
return (fields, total_len)
def __init__(self, cmd):
assert isinstance(cmd, Gemini2Command)
self._cmd = cmd
self._decoded = False
# return: an instance of one of the Decoder subclasses
@abstractmethod
def decoder(self): pass
# input: string containing this response, and potentially additional responses to other commands
# return: integer representing how many characters from the input were decoded for this response
def decode(self, chars):
assert not self._decoded
self._decoded = True
(resp_data, num_chars_processed) = self.decoder().decode(chars)
self._resp_data = self.post_decode(resp_data)
self.interpret()
return num_chars_processed
# purpose: optionally implement this to do some additional post-decode-step verification
def post_decode(self, chars):
return chars
# purpose: optionally implement this to do cmd-specific interpretation of the response string
def interpret(self): pass
def command(self):
return self._cmd
# return: raw response string (or list-of-strings, in the semicolon-delimited case)
def get_raw(self):
assert self._decoded
return self._resp_data
# purpose: optionally override this to return interpreted data instead of the raw response str(s)
def get(self):
return self.get_raw()
# ==================================================================================================
class Gemini2Response_ACK(Gemini2Response):
def decoder(self):
return self.HashTerminatedDecoder()
# --------------------------------------------------------------------------------------------------
class Gemini2Response_Macro(Gemini2Response):
def decoder(self):
return self.SemicolonDelimitedDecoder(self.field_count())
# return: number of semicolon-separated fields expected from this macro response
@abstractmethod
def field_count(self): pass
# --------------------------------------------------------------------------------------------------
class Gemini2Response_LX200(Gemini2Response):
def decoder(self):
return self.HashTerminatedDecoder()
# --------------------------------------------------------------------------------------------------
class Gemini2Response_LX200_FixedLength(Gemini2Response_LX200):
def decoder(self):
return self.FixedLengthDecoder(self.fixed_len())
@abstractmethod
def fixed_len(self): pass
class Gemini2Response_LX200_FixedLengthOrZero(Gemini2Response_LX200_FixedLength):
def decoder(self):
return self.FixedLengthDecoder(self.fixed_len(), True)
# --------------------------------------------------------------------------------------------------
class Gemini2Response_Native(Gemini2Response):
def decoder(self):
return self.HashTerminatedDecoder()
def post_decode(self, chars):
if len(chars) < 1: return
csum_recv = ord(chars[-1])
csum_comp = self.command()._compute_checksum(chars[:-1])
if csum_recv != csum_comp:
raise G2ResponseChecksumMismatchError(csum_recv, csum_comp)
return chars[:-1]
# def get(self):
# # TODO: need to return our post-processed string, not the raw string
# pass
# TODO: implement generic G2-Native response decoding
####################################################################################################
## Commands
# All commands in the following sections are placed in the same order as
# they appear in the serial command reference page:
# http://www.gemini-2.com/web/L5V2_1serial.html
### Enumerations, Constants, etc
# (used in a variety of locations)
# parameter for GetPrecision
class G2Precision(Enum):
DOUBLE = 'DBL PRECISION'
HIGH = 'HIGH PRECISION'
LOW = 'LOW PRECISION'
# parameter for StartupCheck
class G2StartupStatus(Enum):
INITIAL = 'B'
MODE_SELECT = 'b'
COLD_START = 'S'
DONE_EQUATORIAL = 'G'
DONE_ALTAZ = 'A'
# parameter for SelectStartupMode
class G2StartupMode(Enum):
COLD_START = 'C'
WARM_START = 'W'
WARM_RESTART = 'R'
# parameter for MacroENQ [fields 'vel_max', 'vel_x', 'vel_y']
class G2AxisVelocity(Enum):
STALL = '!'
NO_MOVEMENT = 'N'
SLEWING = 'S'
CENTERING = 'C'
TRACKING = 'T'
GUIDING = 'G'
UNDEFINED = '?'
# parameter for MacroENQ [field 'ha_pos']
class G2AxisPosition(Enum):
LOWER_SIDE = 'W'
HIGHER_SIDE = 'E'
# parameter for MacroENQ [field 'park_state']
class G2ParkStatus(Enum):
NOT_PARKED = 0
PARKED = 1
PARKING = 2
# parameter for MacroENQ [field 'pec_state']
# parameter for PECStatus_Set
# response for PECStatus_Get
class G2PECStatus(Flag):
ACTIVE = (1 << 0)
FRESH_DATA_AVAILABLE = (1 << 1)
TRAINING_IN_PROGRESS = (1 << 2)
TRAINING_COMPLETED = (1 << 3)
TRAINING_STARTS_SOON = (1 << 4)
DATA_AVAILABLE = (1 << 5)
# parameter for MacroENQ [field 'cmd99_state']
class G2Status(Flag):
SCOPE_IS_ALIGNED = (1 << 0)
MODELLING_IN_USE = (1 << 1)
OBJECT_IS_SELECTED = (1 << 2)
GOTO_OPERATION_ONGOING = (1 << 3)
RA_LIMIT_REACHED = (1 << 4)
ASSUMING_J2000_OBJ_COORDS = (1 << 5)
# indexes for MacroENQ [field 'revisions']
class G2Revision(IntEnum):
SITE = 0
DATE_TIME = 1
MOUNT_PARAM = 2
DISPLAY_CONTENT = 3
MODEL_PARAM = 4
SPEEDS = 5
PARK = 6
RESERVED = 7
# parameter for MacroENQ [fields 'servo_lag_x', 'servo_lag_y']
G2_SERVO_LAG_MIN = -390
G2_SERVO_LAG_MAX = 390
def parse_servo_lag(string): return parse_int_bounds(string, G2_SERVO_LAG_MIN, G2_SERVO_LAG_MAX)
# parameter for MacroENQ [fields 'servo_duty_x', 'servo_duty_y']
G2_SERVO_DUTY_MIN = -100
G2_SERVO_DUTY_MAX = 100
def parse_servo_duty(string): return parse_int_bounds(string, G2_SERVO_DUTY_MIN, G2_SERVO_DUTY_MAX)
# response for SetObjectRA and SetObjectDec
class G2Valid(Enum):
INVALID = '0'
VALID = '1'
# parameter for RA_StartStop_Set
# parameter for DEC_StartStop_Set
class G2Stopped(Enum):
STOPPED = 0
NOT_STOPPED = 1
# limits for signed 32-bit integer parameters
SINT32_MIN = -((1 << 31) - 0)
SINT32_MAX = ((1 << 31) - 1)
# limits for unsigned 32-bit integer parameters
UINT32_MIN = 0
UINT32_MAX = ((1 << 32) - 1)
### Special Commands
class G2Cmd_StartupCheck(Gemini2Command_ACK):
def response(self): return G2Rsp_StartupCheck(self)
class G2Rsp_StartupCheck(Gemini2Response_ACK):
def interpret(self): self._status = G2StartupStatus(self.get_raw()) # raises ValueError if the response value isn't in the enum
def get(self): return self._status
class G2Cmd_SelectStartupMode(Gemini2Command_LX200_NoReply):
def __init__(self, mode):
if not isinstance(mode, G2StartupMode):
raise G2CommandParameterTypeError('G2StartupMode')
self._mode = mode
def lx200_str(self): return 'b{:s}'.format(G2StartupMode[self._mode])
### Macro Commands
class G2Cmd_MacroENQ(Gemini2Command_Macro):
def cmd_str(self): return '\x05'
def response(self): return G2Rsp_MacroENQ(self)
def valid_for_serial(self): return False # only valid on UDP backend
class G2Rsp_MacroENQ(Gemini2Response_Macro):
def field_count(self): return 21
def interpret(self):
# TODO: implement some range checking on most of the numerical fields here
# (e.g. angle ranges: [0,180) or [-90,+90] or [0,360) etc)
fields = self.get_raw()
self._values = dict()
# self._values['phys_x'] = parse_int (fields[ 0]) # raises G2ResponseIntegerParseError on failure
# self._values['phys_y'] = parse_int (fields[ 1]) # raises G2ResponseIntegerParseError on failure
self._values['pra'] = parse_int (fields[ 0]) # raises G2ResponseIntegerParseError on failure
self._values['pdec'] = parse_int (fields[ 1]) # raises G2ResponseIntegerParseError on failure
self._values['ra'] = parse_ang_dbl (fields[ 2]) # raises G2ResponseAngleParseError on failure
self._values['dec'] = parse_ang_dbl (fields[ 3]) # raises G2ResponseAngleParseError on failure
self._values['ha'] = parse_ang_dbl (fields[ 4]) # raises G2ResponseAngleParseError on failure
self._values['az'] = parse_ang_dbl (fields[ 5]) # raises G2ResponseAngleParseError on failure
self._values['alt'] = parse_ang_dbl (fields[ 6]) # raises G2ResponseAngleParseError on failure
self._values['vel_max'] = G2AxisVelocity (fields[ 7]) # raises ValueError if the response field value isn't in the enum
self._values['vel_x'] = G2AxisVelocity (fields[ 8]) # raises ValueError if the response field value isn't in the enum
self._values['vel_y'] = G2AxisVelocity (fields[ 9]) # raises ValueError if the response field value isn't in the enum
self._values['ha_pos'] = G2AxisPosition (fields[10]) # raises ValueError if the response field value isn't in the enum
self._values['t_sidereal'] = parse_time_dbl (fields[11]) # raises G2ResponseTimeParseError on failure
self._values['park_state'] = G2ParkStatus (int(fields[12])) # raises ValueError if the response field value isn't in the enum
self._values['pec_state'] = G2PECStatus (int(fields[13])) # raises ValueError if the response field value isn't in the enum
self._values['t_wsl'] = parse_time_dbl (fields[14]) # raises G2ResponseTimeParseError on failure
self._values['cmd99_state'] = G2Status (int(fields[15])) # raises ValueError if the response field value isn't in the enum
# self._values['revisions'] = parse_revisions (fields[16]) # raises G2ResponseRevisionsParseError on failure
# self._values['servo_lag_x'] = parse_servo_lag (fields[17]) # raises G2ResponseIntegerParseError or G2ResponseIntegerBoundsViolation on failure
# self._values['servo_lag_y'] = parse_servo_lag (fields[18]) # raises G2ResponseIntegerParseError or G2ResponseIntegerBoundsViolation on failure
# self._values['servo_duty_x'] = parse_servo_duty (fields[19]) # raises G2ResponseIntegerParseError or G2ResponseIntegerBoundsViolation on failure
# self._values['servo_duty_y'] = parse_servo_duty (fields[20]) # raises G2ResponseIntegerParseError or G2ResponseIntegerBoundsViolation on failure
def get(self): return self._values
### Synchronization Commands
class G2Cmd_Echo(Gemini2Command_LX200):
def __init__(self, char):
assert sys.version_info[0] >= 3 # our string type check below is incompatible with Python 2
if (not isinstance(char, str)) or (len(char) != 1):
raise G2CommandParameterTypeError('char')
self._char = char
def lx200_str(self): return 'CE{:s}'.format(self._char)
def response(self): return G2Rsp_Echo(self)
class G2Rsp_Echo(Gemini2Response_LX200): pass
class G2Cmd_AlignToObject(Gemini2Command_LX200):
def lx200_str(self): return 'Cm'
def response(self): return G2Rsp_AlignToObject(self)
class G2Rsp_AlignToObject(Gemini2Response_LX200):
def interpret(self):
if self.get_raw() == 'No object!': raise G2ResponseInterpretationFailure()
class G2Cmd_SyncToObject(Gemini2Command_LX200):
def lx200_str(self): return 'CM'
def response(self): return G2Rsp_SyncToObject(self)
class G2Rsp_SyncToObject(Gemini2Response_LX200):
def interpret(self):
if self.get_raw() == 'No object!': raise G2ResponseInterpretationFailure()
# ...
### Focus Control Commands
# ...
### Get Information Commands
# ...
### Park Commands
# ...
### Move Commands
# ...
### Precision Guiding Commands
# ...
### Object/Observing/Output Commands
class G2Cmd_SetObjectName(Gemini2Command_LX200_NoReply):
def __init__(self, name):
if name == '': raise G2CommandParameterValueError('name cannot be empty')
if '#' in name: raise G2CommandParameterValueError('name cannot contain \'#\' characters')
self._name = name
def lx200_str(self): return 'ON{:s}'.format(self._name)
# ...
### Precession and Refraction Commands
# ...
### Precision Commands
class G2Cmd_GetPrecision(Gemini2Command_LX200):
def lx200_str(self): return 'P'
def response(self): return G2Rsp_GetPrecision(self)
class G2Rsp_GetPrecision(Gemini2Response_LX200_FixedLength):
def fixed_len(self): return 14
def interpret(self): self._precision = G2Precision(self.get_raw()) # raises ValueError if the response value isn't in the enum
def get(self): return self._precision
class G2Cmd_TogglePrecision(Gemini2Command_LX200_NoReply):
def lx200_str(self): return 'U'
class G2Cmd_SetDblPrecision(Gemini2Command_LX200_NoReply):
def lx200_str(self): return 'u'
### Quit Motion Commands
# ...
### Rate Commands
# ...
### Set Commands
class G2Cmd_SetObjectRA(Gemini2Command_LX200):
def __init__(self, ra):
if ra < 0.0 or ra >= 360.0:
raise G2CommandParameterValueError('ra must be >= 0.0 and < 360.0')
_, self._hour, self._min, self._sec = ang_to_hourminsec(ra)
def lx200_str(self): return 'Sr{:02d}:{:02d}:{:02d}'.format(self._hour, self._min, self._sec)
def response(self): return G2Rsp_SetObjectRA(self)
class G2Rsp_SetObjectRA(Gemini2Response_LX200_FixedLength):
def fixed_len(self): return 1
def interpret(self):
validity = G2Valid(self.get_raw()) # raises ValueError if the response field value isn't in the enum
if validity != G2Valid.VALID: raise G2ResponseInterpretationFailure()
class G2Cmd_SetObjectDec(Gemini2Command_LX200):
def __init__(self, dec):
if dec < -90.0 or dec > 90.0:
raise G2CommandParameterValueError('dec must be >= -90.0 and <= 90.0')
sign, self._deg, self._min, self._sec = ang_to_degminsec(dec)
self._signchar = '+' if sign >= 0.0 else '-'
def lx200_str(self): return 'Sd{:s}{:02d}:{:02d}:{:02d}'.format(self._signchar, self._deg, self._min, self._sec)
def response(self): return G2Rsp_SetObjectDec(self)
class G2Rsp_SetObjectDec(Gemini2Response_LX200_FixedLength):
def fixed_len(self): return 1
def interpret(self):
validity = G2Valid(self.get_raw()) # raises ValueError if the response field value isn't in the enum
if validity != G2Valid.VALID: raise G2ResponseInterpretationFailure()
# NOTE: only objects which are currently above the horizon are considered valid
class G2Cmd_SetSiteLongitude(Gemini2Command_LX200):
def __init__(self, lon):
if lon <= -360.0 or lon >= 360.0:
raise G2CommandParameterValueError('lon must be > -360.0 and < 360.0')
sign, self._deg, self._min = ang_to_degmin(lon)
# everyone else in the world uses positive to mean eastern longitudes; but not LX200!
self._signchar = '-' if sign >= 0.0 else '+'
def lx200_str(self): return 'Sg{:s}{:03d}*{:02d}'.format(self._signchar, self._deg, self._min)
def response(self): return G2Rsp_SetSiteLongitude(self)
class G2Rsp_SetSiteLongitude(Gemini2Response_LX200_FixedLengthOrZero):
def fixed_len(self): return 1
def interpret(self):
if len(self.get_raw()) == 0: raise G2ResponseInterpretationFailure() # invalid
if self.get_raw() != '1': raise G2ResponseInterpretationFailure() # ???
class G2Cmd_SetSiteLatitude(Gemini2Command_LX200):
def __init__(self, lat):
if lat < -90.0 or lat > 90.0:
raise G2CommandParameterValueError('lat must be >= -90.0 and <= 90.0')
sign, self._deg, self._min = ang_to_degmin(lat)
self._signchar = '+' if sign >= 0.0 else '-'
def lx200_str(self): return 'St{:s}{:02d}*{:02d}'.format(self._signchar, self._deg, self._min)
def response(self): return G2Rsp_SetSiteLatitude(self)
class G2Rsp_SetSiteLatitude(Gemini2Response_LX200_FixedLengthOrZero):
def fixed_len(self): return 1
def interpret(self):
if len(self.get_raw()) == 0: raise G2ResponseInterpretationFailure() # invalid
if self.get_raw() != '1': raise G2ResponseInterpretationFailure() # ???
# ...
### Site Selection Commands
# NOTE: the official Gemini 2 serial command documentation is WRONG here:
# the range for sites is 0-4 inclusive, not 0-3 inclusive
class G2Cmd_SetStoredSite(Gemini2Command_LX200_NoReply):
def __init__(self, site):
if site < 0 or site > 4: raise G2CommandParameterValueError('site must be >= 0 and <= 4')
self._site = site
def lx200_str(self): return 'W{:d}'.format(self._site)
# NOTE: the official Gemini 2 serial command documentation is WRONG here:
# the range for sites is 0-4 inclusive, not 0-3 inclusive
class G2Cmd_GetStoredSite(Gemini2Command_LX200):
def lx200_str(self): return 'W?'
def response(self): return G2Rsp_GetStoredSite(self)
class G2Rsp_GetStoredSite(Gemini2Response_LX200_FixedLength):
def fixed_len(self): return 1
def interpret(self): self._site = parse_int_bounds(self.get_raw(), 0, 4)
def get(self): return self._site
# ...
### Native Commands
#class G2Cmd_TEST_Native_92_Get(Gemini2Command_Native_Get):
# def __init__(self, val):
# if not isinstance(val, int):
# raise G2CommandParameterTypeError('int')
# self._val = val
# def native_id(self): return 92
## def native_params(self): return '{:d}'.format(self._val)
# def response(self): return None # TODO!
class G2Cmd_PECBootPlayback_Set(Gemini2Command_Native_Set):
def __init__(self, enable):
if not isinstance(enable, bool):
raise G2CommandParameterTypeError('bool')
self._enable = enable
def native_id(self): return 508
def native_params(self): return '1' if self._enable else '0'
class G2Cmd_PECBootPlayback_Get(Gemini2Command_Native_Get):
def native_id(self): return 508
def response(self): return G2Rsp_PECBootPlayback_Get(self)
class G2Rsp_PECBootPlayback_Get(Gemini2Response_Native):
def interpret(self): self._enabled = parse_int_bounds(self.get_raw(), 0, 1)
def get(self): return (self._enabled != 0)
class G2Cmd_PECStatus_Set(Gemini2Command_Native_Set):
def __init__(self, status):
if not isinstance(status, G2PECStatus):
raise G2CommandParameterTypeError('G2PECStatus')
self._status = status
def native_id(self): return 509
def native_params(self): return str(self._status.value)
class G2Cmd_PECStatus_Get(Gemini2Command_Native_Get):
def native_id(self): return 509
def response(self): return G2Rsp_PECStatus_Get(self)
class G2Rsp_PECStatus_Get(Gemini2Response_Native):
def interpret(self):
self._status = G2PECStatus(int(self.get_raw())) # raises ValueError if the response field value isn't in the enum
def get(self): return self._status
class G2Cmd_PECReplayOn_Set(Gemini2Command_Native_Set):
def native_id(self): return 531
class G2Cmd_PECReplayOff_Set(Gemini2Command_Native_Set):
def native_id(self): return 532
class G2Cmd_NTPServerAddr_Set(Gemini2Command_Native_Set):
def __init__(self, addr):
if not isinstance(addr, ipaddress.IPv4Address):
raise G2CommandParameterTypeError('IPv4Address')
self._addr = addr
def native_id(self): return 816
def native_params(self): return str(self._addr)
class G2Cmd_NTPServerAddr_Get(Gemini2Command_Native_Get):
def native_id(self): return 816
def response(self): return G2Rsp_NTPServerAddr_Get(self)
class G2Rsp_NTPServerAddr_Get(Gemini2Response_Native):
def interpret(self): self._addr = parse_ip4vaddr(self.get_raw())
def get(self): return self._addr
# ...
### Undocumented Commands
class G2CmdBase_Divisor_Set(Gemini2Command_Native_Set):
def __init__(self, div):
if not isinstance(div, int):
raise G2CommandParameterTypeError('int')
# clamp divisor into the allowable range
if div < self._div_min(): div = self._div_min()
if div > self._div_max(): div = self._div_max()
self._div = div
def native_params(self): return self._div
def _div_min(self): return SINT32_MIN
def _div_max(self): return SINT32_MAX
class G2Cmd_RA_Divisor_Set(G2CmdBase_Divisor_Set):
def native_id(self): return 451
class G2Cmd_DEC_Divisor_Set(G2CmdBase_Divisor_Set):
def native_id(self): return 452
class G2CmdBase_StartStop_Set(Gemini2Command_Native_Set):
def __init__(self, val):
if not isinstance(val, G2Stopped):
raise G2CommandParameterTypeError('G2Stopped')
self._val = val
def native_params(self): return '{:b}'.format(self._val.value)
class G2Cmd_RA_StartStop_Set(G2CmdBase_StartStop_Set):
def native_id(self): return 453
class G2Cmd_DEC_StartStop_Set(G2CmdBase_StartStop_Set):
def native_id(self): return 454
# TODO: implement GET cmds 451-454
"""
class G2Cmd_Undoc451_Get(Gemini2Command_Native_Get):
def id(self): return 451
def response(self): G2Rsp_Undoc451_Get()
class G2Rsp_Undoc451_Get(Gemini2Response_Native):
# TODO
pass
class G2Cmd_Undoc451_Set(Gemini2Command_Native_Set):
def __init__(self, divisor):
self._divisor = divisor
def id(self): return 451
def param(self): return '{:+d}'.format(self._divisor)
def response(self): G2Rsp_Undoc451_Set()
class G2Rsp_Undoc451_Set(Gemini2Response_Native):
# TODO
pass
"""
# TODO: 452
# TODO: 453
# TODO: 454
"""
HIGH PRIORITY COMMANDS TO IMPLEMENT
===================================
macro 0x05 (ENQ)
set double precision
undocumented native cmds (details omitted here)
LESS IMPORTANT ONES
===================
native 411-412 (do arcsec/sec conversions automagically)
native 21
get meridian side
native 130-137
get RA
get DEC
get AZ
get ALT
all the date+time shit (some of this is under 'Set' category for some idiotic reason)
all the site shit
get info buffer
velocities
park stuff
all move commands + quit-moving commands
native 120-122
native 190-192 (191 = "do not track at all")
native 220-223
native 826
native 65533-65535
BELOW THAT
==========
everything else in category/alphabetical order
"""
| bgottula/point | point/gemini_commands.py | Python | mit | 36,330 |
from flask import jsonify
from app.exceptions import ValidationError
from app.api_1_0 import api
def bad_request(message):
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
def unauthorized(message):
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
def forbidden(message):
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
def not_acceptable(message):
response = jsonify({'error': 'not acceptable', 'message': message})
response.status_code = 406
return response
@api.errorhandler(ValidationError)
def validation_error(e):
return bad_request(e.args[0])
| akahard2dj/GreenLight | app/api_1_0/errors.py | Python | mit | 790 |
# mako/cache.py
# Copyright (C) 2006-2011 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import exceptions
cache = None
class BeakerMissing(object):
def get_cache(self, name, **kwargs):
raise exceptions.RuntimeException("the Beaker package is required to use cache functionality.")
class Cache(object):
"""Represents a data content cache made available to the module
space of a :class:`.Template` object.
:class:`.Cache` is a wrapper on top of a Beaker CacheManager object.
This object in turn references any number of "containers", each of
which defines its own backend (i.e. file, memory, memcached, etc.)
independently of the rest.
"""
def __init__(self, id, starttime):
self.id = id
self.starttime = starttime
self.def_regions = {}
def put(self, key, value, **kwargs):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).put_value(key, starttime=self.starttime, expiretime=expiretime)
def get(self, key, **kwargs):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
return self._get_cache(defname, **kwargs).get_value(key, starttime=self.starttime, expiretime=expiretime, createfunc=createfunc)
def invalidate(self, key, **kwargs):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kwargs: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
defname = kwargs.pop('defname', None)
expiretime = kwargs.pop('expiretime', None)
createfunc = kwargs.pop('createfunc', None)
self._get_cache(defname, **kwargs).remove_value(key, starttime=self.starttime, expiretime=expiretime)
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this template.
"""
self.invalidate('render_body', defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular <%def> within this template."""
self.invalidate('render_%s' % name, defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested <%def> within this template.
Caching of nested defs is a blunt tool as there is no
management of scope - nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, defname=name)
def _get_cache(self, defname, type=None, **kw):
global cache
if not cache:
try:
from beaker import cache as beaker_cache
cache = beaker_cache.CacheManager()
except ImportError:
# keep a fake cache around so subsequent
# calls don't attempt to re-import
cache = BeakerMissing()
if type == 'memcached':
type = 'ext:memcached'
if not type:
(type, kw) = self.def_regions.get(defname, ('memory', {}))
else:
self.def_regions[defname] = (type, kw)
return cache.get_cache(self.id, type=type, **kw)
| rubencabrera/LazyLibrarian | mako/cache.py | Python | gpl-3.0 | 4,476 |
Subsets and Splits