code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from ceilometer.ipmi.pollsters import node
from ceilometer.tests.ipmi.pollsters import base
CONF = cfg.CONF
CONF.import_opt('host', 'ceilometer.service')
class TestPowerPollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"Current_value": ['13', '00']}
def make_pollster(self):
return node.PowerPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 19(0x13 as current_value)
self._verify_metering(1, 19, CONF.host)
class TestInletTemperaturePollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"Current_value": ['23', '00']}
def make_pollster(self):
return node.InletTemperaturePollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 35(0x23 as current_value)
self._verify_metering(1, 35, CONF.host)
class TestOutletTemperaturePollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"Current_value": ['25', '00']}
def make_pollster(self):
return node.OutletTemperaturePollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 37(0x25 as current_value)
self._verify_metering(1, 37, CONF.host)
class TestAirflowPollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"Current_value": ['be', '00']}
def make_pollster(self):
return node.AirflowPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 190(0xbe as current_value)
self._verify_metering(1, 190, CONF.host)
class TestCUPSIndexPollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"CUPS_Index": ['2e', '00']}
def make_pollster(self):
return node.CUPSIndexPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 190(0xbe)
self._verify_metering(1, 46, CONF.host)
class CPUUtilPollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"CPU_Utilization":
['33', '00', '00', '00', '00', '00', '00', '00']}
def make_pollster(self):
return node.CPUUtilPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 190(0xbe)
self._verify_metering(1, 51, CONF.host)
class MemUtilPollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"Mem_Utilization":
['05', '00', '00', '00', '00', '00', '00', '00']}
def make_pollster(self):
return node.MemUtilPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 5(0x05)
self._verify_metering(1, 5, CONF.host)
class IOUtilPollster(base.TestPollsterBase):
def fake_data(self):
# data after parsing Intel Node Manager output
return {"IO_Utilization":
['00', '00', '00', '00', '00', '00', '00', '00']}
def make_pollster(self):
return node.IOUtilPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
# only one sample, and value is 0(0x00)
self._verify_metering(1, 0, CONF.host)
| pkilambi/ceilometer | ceilometer/tests/ipmi/pollsters/test_node.py | Python | apache-2.0 | 4,896 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"alignment.c:327",
"inet_hashtables.h:356",
"mmu.c:602",
"return_address.c:62",
"swab.h:49",
"SemaLambda.cpp:946",
"CGObjCGNU.cpp:1414",
"BugReporter.h:146",
"RegionStore.cpp:1904",
"SymbolManager.cpp:484",
"RewriteObjCFoundationAPI.cpp:737",
"RewriteObjCFoundationAPI.cpp:696",
"CommentParser.cpp:394",
"CommentParser.cpp:391",
"CommentParser.cpp:356",
"LegalizeDAG.cpp:3646",
"IRBuilder.h:844",
"DataLayout.cpp:193",
"transport.c:653",
"xt_socket.c:307",
"xt_socket.c:161",
"inet_hashtables.h:356",
"xc4000.c:1049",
"xc4000.c:1063",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
| htc-msm8960/android_kernel_htc_msm8930 | scripts/gcc-wrapper.py | Python | gpl-2.0 | 3,965 |
import math
import itertools
def prime_list(lower, upper):
primes = []
for n in range(lower, upper, 2):
p = True
for d in range(3, int(math.sqrt(n)) + 1):
if n % d == 0:
p = False
break
if p:
primes.append(n)
return primes
def is_prime(x):
if x % 2 == 0:
return False
d = 3
upper = int(x ** 0.5 + 1)
while d <= upper:
if x % d == 0:
return False
d += 2
return True
print(is_prime(971))
thr_dig = prime_list(101, 1001)
fou_dig = prime_list(1001, 10001)
fiv_dig = prime_list(10001, 100001)
six_dig = prime_list(100001, 1000001)
def str_rotate(s):
rotations = []
for rotation in range(len(s)):
s = s[1:] + s[0]
rotations.append(s)
return rotations
def find_circular_primes(p_ls):
circular = True
circ_nums = []
for prime in p_ls:
for check in str_rotate(str(prime)):
if not is_prime(int(check)):
circular = False
break
if circular:
circ_nums.append(prime)
circular = True
return circ_nums
len3_5 = len(find_circular_primes(thr_dig)) + len(find_circular_primes(fou_dig)) + len(find_circular_primes(fiv_dig)) \
+ len(find_circular_primes(six_dig))
print(len3_5 + 13)
| AustinHartman/randomPrograms | euler35.py | Python | gpl-3.0 | 1,355 |
import six
from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants
from pyrsistent._field_common import (
set_fields, check_type, PFIELD_NO_INITIAL, serialize, check_global_invariants)
from pyrsistent._pmap import PMap, pmap
class _PRecordMeta(type):
def __new__(mcs, name, bases, dct):
set_fields(dct, bases, name='_precord_fields')
store_invariants(dct, bases, '_precord_invariants', '__invariant__')
dct['_precord_mandatory_fields'] = \
set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
dct['_precord_initial_values'] = \
dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
dct['__slots__'] = ()
return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
@six.add_metaclass(_PRecordMeta)
class PRecord(PMap, CheckedType):
"""
A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
access using subscript notation.
More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent
"""
def __new__(cls, **kwargs):
# Hack total! If these two special attributes exist that means we can create
# ourselves. Otherwise we need to go through the Evolver to create the structures
# for us.
if '_precord_size' in kwargs and '_precord_buckets' in kwargs:
return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets'])
initial_values = kwargs
if cls._precord_initial_values:
initial_values = dict(cls._precord_initial_values)
initial_values.update(kwargs)
e = _PRecordEvolver(cls, pmap())
for k, v in initial_values.items():
e[k] = v
return e.persistent()
def set(self, *args, **kwargs):
"""
Set a field in the record. This set function differs slightly from that in the PMap
class. First of all it accepts key-value pairs. Second it accepts multiple key-value
pairs to perform one, atomic, update of multiple fields.
"""
# The PRecord set() can accept kwargs since all fields that have been declared are
# valid python identifiers. Also allow multiple fields to be set in one operation.
if args:
return super(PRecord, self).set(args[0], args[1])
return self.update(kwargs)
def evolver(self):
"""
Returns an evolver of this object.
"""
return _PRecordEvolver(self.__class__, self)
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items()))
@classmethod
def create(cls, kwargs):
"""
Factory method. Will create a new PRecord of the current type and assign the values
specified in kwargs.
"""
if isinstance(kwargs, cls):
return kwargs
return cls(**kwargs)
def __reduce__(self):
# Pickling support
return _restore_pickle, (self.__class__, dict(self),)
def serialize(self, format=None):
"""
Serialize the current PRecord using custom serializer functions for fields where
such have been supplied.
"""
return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items())
class _PRecordEvolver(PMap._Evolver):
__slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields')
def __init__(self, cls, *args):
super(_PRecordEvolver, self).__init__(*args)
self._destination_cls = cls
self._invariant_error_codes = []
self._missing_fields = []
def __setitem__(self, key, original_value):
self.set(key, original_value)
def set(self, key, original_value):
field = self._destination_cls._precord_fields.get(key)
if field:
try:
value = field.factory(original_value)
except InvariantException as e:
self._invariant_error_codes += e.invariant_errors
self._missing_fields += e.missing_fields
return self
check_type(self._destination_cls, field, key, value)
is_ok, error_code = field.invariant(value)
if not is_ok:
self._invariant_error_codes.append(error_code)
return super(_PRecordEvolver, self).set(key, value)
else:
raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
def persistent(self):
cls = self._destination_cls
is_dirty = self.is_dirty()
pm = super(_PRecordEvolver, self).persistent()
if is_dirty or not isinstance(pm, cls):
result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
else:
result = pm
if cls._precord_mandatory_fields:
self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
in (cls._precord_mandatory_fields - set(result.keys())))
if self._invariant_error_codes or self._missing_fields:
raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
'Field invariant failed')
check_global_invariants(result, cls._precord_invariants)
return result
| Futrell/pyrsistent | pyrsistent/_precord.py | Python | mit | 5,764 |
import pickle
import unittest
from test import test_support as support
turtle = support.import_module('turtle')
Vec2D = turtle.Vec2D
test_config = """\
width = 0.75
height = 0.8
canvwidth = 500
canvheight = 200
leftright = 100
topbottom = 100
mode = world
colormode = 255
delay = 100
undobuffersize = 10000
shape = circle
pencolor = red
fillcolor = blue
resizemode = auto
visible = None
language = english
exampleturtle = turtle
examplescreen = screen
title = Python Turtle Graphics
using_IDLE = ''
"""
test_config_two = """\
# Comments!
# Testing comments!
pencolor = red
fillcolor = blue
visible = False
language = english
# Some more
# comments
using_IDLE = False
"""
invalid_test_config = """
pencolor = red
fillcolor: blue
visible = False
"""
class TurtleConfigTest(unittest.TestCase):
def get_cfg_file(self, cfg_str):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'w') as f:
f.write(cfg_str)
return support.TESTFN
def test_config_dict(self):
cfg_name = self.get_cfg_file(test_config)
parsed_cfg = turtle.config_dict(cfg_name)
expected = {
'width' : 0.75,
'height' : 0.8,
'canvwidth' : 500,
'canvheight': 200,
'leftright': 100,
'topbottom': 100,
'mode': 'world',
'colormode': 255,
'delay': 100,
'undobuffersize': 10000,
'shape': 'circle',
'pencolor' : 'red',
'fillcolor' : 'blue',
'resizemode' : 'auto',
'visible' : None,
'language': 'english',
'exampleturtle': 'turtle',
'examplescreen': 'screen',
'title': 'Python Turtle Graphics',
'using_IDLE': '',
}
self.assertEqual(parsed_cfg, expected)
def test_partial_config_dict_with_commments(self):
cfg_name = self.get_cfg_file(test_config_two)
parsed_cfg = turtle.config_dict(cfg_name)
expected = {
'pencolor': 'red',
'fillcolor': 'blue',
'visible': False,
'language': 'english',
'using_IDLE': False,
}
self.assertEqual(parsed_cfg, expected)
def test_config_dict_invalid(self):
cfg_name = self.get_cfg_file(invalid_test_config)
with support.captured_stdout() as stdout:
parsed_cfg = turtle.config_dict(cfg_name)
err_msg = stdout.getvalue()
self.assertIn('Bad line in config-file ', err_msg)
self.assertIn('fillcolor: blue', err_msg)
self.assertEqual(parsed_cfg, {
'pencolor': 'red',
'visible': False,
})
class VectorComparisonMixin:
def assertVectorsAlmostEqual(self, vec1, vec2):
if len(vec1) != len(vec2):
self.fail("Tuples are not of equal size")
for idx, (i, j) in enumerate(zip(vec1, vec2)):
self.assertAlmostEqual(
i, j, msg='values at index {} do not match'.format(idx))
class TestVec2D(VectorComparisonMixin, unittest.TestCase):
def test_constructor(self):
vec = Vec2D(0.5, 2)
self.assertEqual(vec[0], 0.5)
self.assertEqual(vec[1], 2)
self.assertIsInstance(vec, Vec2D)
self.assertRaises(TypeError, Vec2D)
self.assertRaises(TypeError, Vec2D, 0)
self.assertRaises(TypeError, Vec2D, (0, 1))
self.assertRaises(TypeError, Vec2D, vec)
self.assertRaises(TypeError, Vec2D, 0, 1, 2)
def test_repr(self):
vec = Vec2D(0.567, 1.234)
self.assertEqual(repr(vec), '(0.57,1.23)')
def test_equality(self):
vec1 = Vec2D(0, 1)
vec2 = Vec2D(0.0, 1)
vec3 = Vec2D(42, 1)
self.assertEqual(vec1, vec2)
self.assertEqual(vec1, tuple(vec1))
self.assertEqual(tuple(vec1), vec1)
self.assertNotEqual(vec1, vec3)
self.assertNotEqual(vec2, vec3)
def test_pickling(self):
vec = Vec2D(0.5, 2)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.dumps(vec, protocol=proto)
unpickled = pickle.loads(pickled)
self.assertEqual(unpickled, vec)
self.assertIsInstance(unpickled, Vec2D)
def _assert_arithmetic_cases(self, test_cases, lambda_operator):
for test_case in test_cases:
((first, second), expected) = test_case
op1 = Vec2D(*first)
op2 = Vec2D(*second)
result = lambda_operator(op1, op2)
expected = Vec2D(*expected)
self.assertVectorsAlmostEqual(result, expected)
def test_vector_addition(self):
test_cases = [
(((0, 0), (1, 1)), (1.0, 1.0)),
(((-1, 0), (2, 2)), (1, 2)),
(((1.5, 0), (1, 1)), (2.5, 1)),
]
self._assert_arithmetic_cases(test_cases, lambda x, y: x + y)
def test_vector_subtraction(self):
test_cases = [
(((0, 0), (1, 1)), (-1, -1)),
(((10.625, 0.125), (10, 0)), (0.625, 0.125)),
]
self._assert_arithmetic_cases(test_cases, lambda x, y: x - y)
def test_vector_multiply(self):
vec1 = Vec2D(10, 10)
vec2 = Vec2D(0.5, 3)
answer = vec1 * vec2
expected = 35
self.assertAlmostEqual(answer, expected)
vec = Vec2D(0.5, 3)
answer = vec * 10
expected = Vec2D(5, 30)
self.assertVectorsAlmostEqual(answer, expected)
def test_vector_negative(self):
vec = Vec2D(10, -10)
expected = (-10, 10)
self.assertVectorsAlmostEqual(-vec, expected)
def test_distance(self):
vec = Vec2D(6, 8)
expected = 10
self.assertEqual(abs(vec), expected)
vec = Vec2D(0, 0)
expected = 0
self.assertEqual(abs(vec), expected)
vec = Vec2D(2.5, 6)
expected = 6.5
self.assertEqual(abs(vec), expected)
def test_rotate(self):
cases = [
(((0, 0), 0), (0, 0)),
(((0, 1), 90), (-1, 0)),
(((0, 1), -90), (1, 0)),
(((1, 0), 180), (-1, 0)),
(((1, 0), 360), (1, 0)),
]
for case in cases:
(vec, rot), expected = case
vec = Vec2D(*vec)
got = vec.rotate(rot)
self.assertVectorsAlmostEqual(got, expected)
class TestTNavigator(VectorComparisonMixin, unittest.TestCase):
def setUp(self):
self.nav = turtle.TNavigator()
def test_goto(self):
self.nav.goto(100, -100)
self.assertAlmostEqual(self.nav.xcor(), 100)
self.assertAlmostEqual(self.nav.ycor(), -100)
def test_pos(self):
self.assertEqual(self.nav.pos(), self.nav._position)
self.nav.goto(100, -100)
self.assertEqual(self.nav.pos(), self.nav._position)
def test_left(self):
self.assertEqual(self.nav._orient, (1.0, 0))
self.nav.left(90)
self.assertVectorsAlmostEqual(self.nav._orient, (0.0, 1.0))
def test_right(self):
self.assertEqual(self.nav._orient, (1.0, 0))
self.nav.right(90)
self.assertVectorsAlmostEqual(self.nav._orient, (0, -1.0))
def test_reset(self):
self.nav.goto(100, -100)
self.assertAlmostEqual(self.nav.xcor(), 100)
self.assertAlmostEqual(self.nav.ycor(), -100)
self.nav.reset()
self.assertAlmostEqual(self.nav.xcor(), 0)
self.assertAlmostEqual(self.nav.ycor(), 0)
def test_forward(self):
self.nav.forward(150)
expected = Vec2D(150, 0)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
self.nav.reset()
self.nav.left(90)
self.nav.forward(150)
expected = Vec2D(0, 150)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
self.assertRaises(TypeError, self.nav.forward, 'skldjfldsk')
def test_backwards(self):
self.nav.back(200)
expected = Vec2D(-200, 0)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
self.nav.reset()
self.nav.right(90)
self.nav.back(200)
expected = Vec2D(0, 200)
self.assertVectorsAlmostEqual(self.nav.position(), expected)
def test_distance(self):
self.nav.forward(100)
expected = 100
self.assertAlmostEqual(self.nav.distance(Vec2D(0,0)), expected)
def test_radians_and_degrees(self):
self.nav.left(90)
self.assertAlmostEqual(self.nav.heading(), 90)
self.nav.radians()
self.assertAlmostEqual(self.nav.heading(), 1.57079633)
self.nav.degrees()
self.assertAlmostEqual(self.nav.heading(), 90)
def test_towards(self):
coordinates = [
# coordinates, expected
((100, 0), 0.0),
((100, 100), 45.0),
((0, 100), 90.0),
((-100, 100), 135.0),
((-100, 0), 180.0),
((-100, -100), 225.0),
((0, -100), 270.0),
((100, -100), 315.0),
]
for (x, y), expected in coordinates:
self.assertEqual(self.nav.towards(x, y), expected)
self.assertEqual(self.nav.towards((x, y)), expected)
self.assertEqual(self.nav.towards(Vec2D(x, y)), expected)
def test_heading(self):
self.nav.left(90)
self.assertAlmostEqual(self.nav.heading(), 90)
self.nav.left(45)
self.assertAlmostEqual(self.nav.heading(), 135)
self.nav.right(1.6)
self.assertAlmostEqual(self.nav.heading(), 133.4)
self.assertRaises(TypeError, self.nav.right, 'sdkfjdsf')
self.nav.reset()
rotations = [10, 20, 170, 300]
result = sum(rotations) % 360
for num in rotations:
self.nav.left(num)
self.assertEqual(self.nav.heading(), result)
self.nav.reset()
result = (360-sum(rotations)) % 360
for num in rotations:
self.nav.right(num)
self.assertEqual(self.nav.heading(), result)
self.nav.reset()
rotations = [10, 20, -170, 300, -210, 34.3, -50.2, -10, -29.98, 500]
sum_so_far = 0
for num in rotations:
if num < 0:
self.nav.right(abs(num))
else:
self.nav.left(num)
sum_so_far += num
self.assertAlmostEqual(self.nav.heading(), sum_so_far % 360)
def test_setheading(self):
self.nav.setheading(102.32)
self.assertAlmostEqual(self.nav.heading(), 102.32)
self.nav.setheading(-123.23)
self.assertAlmostEqual(self.nav.heading(), (-123.23) % 360)
self.nav.setheading(-1000.34)
self.assertAlmostEqual(self.nav.heading(), (-1000.34) % 360)
self.nav.setheading(300000)
self.assertAlmostEqual(self.nav.heading(), 300000%360)
def test_positions(self):
self.nav.forward(100)
self.nav.left(90)
self.nav.forward(-200)
self.assertVectorsAlmostEqual(self.nav.pos(), (100.0, -200.0))
def test_setx_and_sety(self):
self.nav.setx(-1023.2334)
self.nav.sety(193323.234)
self.assertVectorsAlmostEqual(self.nav.pos(), (-1023.2334, 193323.234))
def test_home(self):
self.nav.left(30)
self.nav.forward(-100000)
self.nav.home()
self.assertVectorsAlmostEqual(self.nav.pos(), (0,0))
self.assertAlmostEqual(self.nav.heading(), 0)
def test_distance_method(self):
self.assertAlmostEqual(self.nav.distance(30, 40), 50)
vec = Vec2D(0.22, .001)
self.assertAlmostEqual(self.nav.distance(vec), 0.22000227271553355)
another_turtle = turtle.TNavigator()
another_turtle.left(90)
another_turtle.forward(10000)
self.assertAlmostEqual(self.nav.distance(another_turtle), 10000)
class TestTPen(unittest.TestCase):
def test_pendown_and_penup(self):
tpen = turtle.TPen()
self.assertTrue(tpen.isdown())
tpen.penup()
self.assertFalse(tpen.isdown())
tpen.pendown()
self.assertTrue(tpen.isdown())
def test_showturtle_hideturtle_and_isvisible(self):
tpen = turtle.TPen()
self.assertTrue(tpen.isvisible())
tpen.hideturtle()
self.assertFalse(tpen.isvisible())
tpen.showturtle()
self.assertTrue(tpen.isvisible())
def test_main():
support.run_unittest(TurtleConfigTest, TestVec2D, TestTNavigator, TestTPen)
if __name__ == '__main__':
test_main()
| HiSPARC/station-software | user/python/Lib/test/test_turtle.py | Python | gpl-3.0 | 12,577 |
# -*- encoding: utf-8 -*-
# region gplv3preamble
# The Medical Simulation Markup Language (MSML) - Simplifying the biomechanical modeling workflow
#
# MSML has been developed in the framework of 'SFB TRR 125 Cognition-Guided Surgery'
#
# If you use this software in academic work, please cite the paper:
# S. Suwelack, M. Stoll, S. Schalck, N.Schoch, R. Dillmann, R. Bendl, V. Heuveline and S. Speidel,
# The Medical Simulation Markup Language (MSML) - Simplifying the biomechanical modeling workflow,
# Medicine Meets Virtual Reality (MMVR) 2014
#
# Copyright (C) 2013-2014 see Authors.txt
#
# If you have any questions please feel free to contact us at [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# endregion
__authors__ = 'Nicolai Schoch, Alexander Weigl <[email protected]>'
__license__ = 'GPLv3'
import os
import jinja2
from msml.model import *
from msml.exceptions import *
import msml.ext.misc
class MSMLHiFlow3ExporterWarning(MSMLWarning): pass
from .. import log
from path import path
from collections import namedtuple
from ..base import Exporter
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(path(__file__).dirname()))
SCENE_TEMPLATE = jinja_env.get_template("hiflow_scene.tpl.xml")
BCDATA_TEMPLATE = jinja_env.get_template("hiflow_bcdata.tpl.xml")
class BcData(object):
def __init__(self):
self.fc = BcDataEntry() # fixed dirichlet constraint
self.dc = BcDataEntry() # displacement dirichlet constraint
self.fp = BcDataEntry() # force/pressure neumann constraint
class BcDataEntry(object):
"""Holds the data for a Fixed/Displacement (Dirichlet) constraint or Pressure/Force (Neumann) constraint in the bcdata file.
"""
def __init__(self):
self._num = 0
self._points = []
self._vectors = []
self.is_valid()
def is_valid(self):
"""asserts, that the amount of points and vectors are dividable by 3
and correct to the given number of points
:raises: Assertion, if data structure is wrong.
:return: None
"""
div3 = lambda x: len(x) % 3 == 0
assert div3(self._points)
assert div3(self._vectors)
assert self._num * 3 == len(self._points)
assert self._num * 3 == len(self._vectors)
def append(self, count, points, vectors):
"""Appends the given `points` with `vectors` to the constraint.
* length of points has to be dividable by 3
* length of vectors has to be dividable by 3
* if vectors just holds three components it is repeated
to the correct amount given by `count`
* each component of points and vectors is casted to float
:param count: amount of points
:param points: a list of points (3*count == len(points)
:type points: list
:param vectors: a list of points (3*count == len(points)
:type list: list
:return: None
"""
as_float = lambda seq: map(float, seq)
points = as_float(points)
vectors = as_float(vectors)
if len(vectors) == 3:
# a single vector is given
vectors = vectors * count
self._num += count
self._points += points
self._vectors += vectors
self.is_valid()
def __repr__(self):
return "%s.%s(%s, %s, %s)" % (
self.__module__, type(self).__name__,
repr(self.num), repr(self._points), repr(self._vectors)
)
def __str__(self):
return "<%s.%s num: %d >" % (self.__module__, type(self).__name__, self._num)
@property
def num(self):
return self._num
@property
def points(self):
return list_to_hf3(self._points)
@property
def vectors(self):
return list_to_hf3(self._vectors)
# namedtuple(...) dynamically creates a class -> class constructor.
Entry = namedtuple("Entry", "mesh bcdata")
# Hiflow3-supported features
HIFLOW_FEATURES = frozenset(
['object_element_displacement_supported', 'output_supported', 'object_element_mass_supported',
'scene_objects_supported', 'constraints_supported', 'env_processingunit_CPU_supported',
'material_region_supported', 'env_linearsolver_iterativeCG_supported', 'env_preconditioner_None_supported',
'object_element_linearElasticMaterial_supported', 'sets_elements_supported', 'sets_nodes_supported',
'sets_surface_supported', 'environment_simulation_steps_supported', 'object_element_fixedConstraint_supported',
'env_timeintegration_dynamicImplicitEuler_supported']) # NOTE: ask Alexander: anything from new stuff to be added here?!
class HiFlow3Exporter(Exporter):
"""Exporter for `hiflow3 <http://hiflow3.org>`_
.. todo::
What does this exporter support? - See GitHub issue n73.
"""
def __init__(self, msml_file):
"""
:param msml_file:
:type msml_file: MSMLFile
"""
self.name = 'HiFlow3Exporter'
self.initialize(
msml_file=msml_file,
mesh_sort=('VTU', 'Mesh'),
features=HIFLOW_FEATURES,
)
def render(self):
"""
Builds the File (XML e.g) for the external tool
"""
filename = self._msml_file.filename.namebase
log.info("Converting to HiFlow3 input formats")
log.info(" -- (hiflow3Scene.xml-file & vtkMesh.vtu-file & hiflow3BCdata.xml-file).")
self.create_scenes()
log.info("Hiflow3 Scene Files: %s" % ', '.join(self.scenes))
def execute(self):
"""Execute `runHiFlow3`
"""
import msml.envconfig
import os
try:
os.makedirs("SimResults")
except: pass
for scenefile in self.scenes:
cmd = "%s %s" % (msml.envconfig.HIFLOW_EXECUTABLE, scenefile)
log.info("Executing HiFlow3: %s" % cmd)
os.system(cmd)
def create_scenes(self):
"""
:param hf3xmlfile:
:type hf3xmlfile: file
:return:
"""
self.scenes = list()
for msmlObject in self._msml_file.scene:
assert isinstance(msmlObject, SceneObject)
meshFilename = self.get_value_from_memory(msmlObject.mesh)
hf3_filename = '%s_%s_hf3.xml' % (self._msml_file.filename.namebase, msmlObject.id)
# only take the first
bc_filename = self.create_bcdata_files(msmlObject)[0]
self.scenes.append(hf3_filename)
class HF3MaterialModel(object):
def __init__(self):
self.id, self.lamelambda, self.lamemu, self.gravity, self.density = [None] * 5
hiflow_material_models = []
# get and compute elasticity constants (i.e. material parameters):
# therefore, iterate over "material" and "material's region"
# (compare to: NewSofaExporter.createMaterialRegion().)
for c, matregion in enumerate(msmlObject.material):
hiflow_model = HF3MaterialModel()
hiflow_material_models.append(hiflow_model)
hiflow_model.id = c
assert isinstance(matregion, MaterialRegion)
indices = self.get_value_from_memory(matregion)
# TODO: setup representation of hiflow_model.id for scenarios
# where bounding boxes cannot bound material regions.
# TODO: build example/test inp-file with correct material region id
# (i.e.: hiflow_model.id for every point in indices).
for material in matregion:
if 'linearElasticMaterial' == material.attributes['__tag__']:
E = float(material.attributes["youngModulus"])
NU = float(material.attributes["poissonRatio"])
hiflow_model.lamelambda = (E * NU) / ((1 + NU) * (1 - 2 * NU))
hiflow_model.lamemu = E / (2 * (1 + NU))
hiflow_model.gravity = -9.81
if 'mass' == material.attributes['__tag__']:
hiflow_model.density = material.attributes['massDensity']
maxtimestep = self._msml_file.env.simulation[0].iterations
if maxtimestep > 1:
SolveInstationary = 1
else:
SolveInstationary = 0
#print os.path.abspath(hf3_filename), "!!!!!!"
with open(hf3_filename, 'w') as fp:
content = SCENE_TEMPLATE.render(
hiflow_material_models=hiflow_material_models,
# template arguments
meshfilename=meshFilename,
bcdatafilename=bc_filename,
solverPlatform=self._msml_file.env.solver.processingUnit,
numParaProcCPU=self._msml_file.env.solver.numParallelProcessesOnCPU,
hf3_chanceOfContact=self._msml_file.env.solver.hf3_chanceOfContactBoolean,
SolveInstationary=SolveInstationary,
DeltaT=self._msml_file.env.simulation[0].dt,
maxtimestep=maxtimestep,
linsolver=self._msml_file.env.solver.linearSolver,
precond=self._msml_file.env.solver.preconditioner,
timeIntegrationMethod=self._msml_file.env.solver.timeIntegration,
RayleighRatioMass=self._msml_file.env.solver.dampingRayleighRatioMass,
RayleighRatioStiffness=self._msml_file.env.solver.dampingRayleighRatioStiffness
#
# TODO: include mvGeometryAnalytics-Info (computed in MSML pipeline) here.
# <NeumannBC_upperMVcenterPoint>{84.0, 93.0, 160.0}</NeumannBC_upperMVcenterPoint> <!-- TODO implement this flexibly! -->
# <NeumannBC_avAnnulusRingRadius>23.0</NeumannBC_avAnnulusRingRadius> <!-- TODO implement this flexibly! -->
#
# Note: in future there may be more arguments, such as RefinementLevels, lin/quadElements, ...
# The currently chosen sets of flexible and fixed parameters in HiFlow3Scene.xml-files represent a maximally general optimal setting.
)
fp.write(content)
def create_bcdata_files(self, obj):
"""creates all bcdata files for all declared steps in `msml/env/simulation`
:param obj: scene object
:type obj: msml.model.base.SceneObject
:return:
"""
def create():
for step in self._msml_file.env.simulation:
filename = '%s_%s_%s.bc.xml' % (self._msml_file.filename.namebase, obj.id, step.name)
data = self.create_bcdata(obj, step.name)
content = BCDATA_TEMPLATE.render(data = data)
with open(filename, 'w') as h:
h.write(content)
yield filename
return list(create())
def create_bcdata(self, obj, step):
"""
:param obj:
:type obj: msml.model.base.SceneObject
:type step: msml.model.base.MSMLEnvironment.Simulation.Step
:return: a object of BcData
:rtype: BcData
"""
bcdata = BcData()
# find the constraints for the given step
for cs in obj.constraints:
if cs.for_step == step or cs.for_step == "${%s}" % step:
break
else:
cs = None
if cs is None: # nothing to do here
log.warn("No constraint region found for step %s" % step)
return bcdata
mesh_name = self.get_value_from_memory(obj.mesh)
for constraint in cs.constraints:
indices = self.get_value_from_memory(constraint, "indices")
points = msml.ext.misc.PositionFromIndices(mesh_name, tuple((map(int, indices))), 'points')
count = len(points) / 3
points_str = list_to_hf3(points) # TODO: adapt this for non-box-able indices/vertices/facets/cells.
if constraint.tag == "fixedConstraint":
bcdata.fc.append(count, points, [0, 0, 0])
elif constraint.tag == "displacementConstraint":
disp_vector = constraint.displacement.split(" ")
bcdata.dc.append(count, points, disp_vector)
elif constraint.tag == "surfacePressure": # TODO?! - would this need to be adapted?!
force_vector = constraint.pressure.split(" ")
bcdata.fp.append(count, points, force_vector)
return bcdata
def count_vector(vec, count):
assert len(vec) == 3
vec = map(lambda x: "%0.15f" % float(x), vec)
return ";".join(count * [",".join(vec)])
def list_to_hf3(seq):
"""transfers a seq of values into a string for hiflow3.
:param seq: a sequence (iterable) of value (int, float, ...)
:rtype: str
>>> points = map(float, [1,2,3]*3)
>>> list_to_hf3(points)
"1.0,2.0,3.0;1.0,2.0,3.0;1.0,2.0,3.0"
"""
from cStringIO import StringIO
s = StringIO()
for i, p in enumerate(seq, 1):
s.write("%0.15f" % float(p))
if i % 3 == 0 and i != 1:
s.write(";")
else:
s.write(",")
s = s.getvalue()[:-1]
assert s.count(';') + 1 == len(seq) / 3
return s
| CognitionGuidedSurgery/msml | src/msml/exporter/hiflow3/__init__.py | Python | gpl-3.0 | 13,923 |
# Copyright (C) 2014 Peter Feiner
import sys
def safe_import(name):
if 'eventlet' in sys.modules:
import eventlet.patcher
return eventlet.patcher.original(name)
else:
return __import__(name)
fcntl = safe_import('fcntl')
os = safe_import('os')
select = safe_import('select')
threading = safe_import('threading')
time = safe_import('time')
signal = safe_import('signal')
import contextlib
import gc
import inspect
import cStringIO
from . import io
class State(object):
def __init__(self):
self.reset()
def reset(self):
self.thread = None
self.pipe = None
self.options = None
self.sampling = False
state = State()
def set_cloexec(fd):
fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
def safe_write(fd, buf):
r = 0
while r < len(buf):
r = os.write(fd, buf[r:])
@contextlib.contextmanager
def flock(fd, op=fcntl.LOCK_EX):
fcntl.flock(fd, op)
try:
yield
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
def reopen(fd, mode):
new_fd = os.open('/proc/self/fd/%d' % fd, mode)
os.close(fd)
return new_fd
def event_header(now, pid, tid, event):
return '%f\0%d\0%d\0%s\0\n' % (now, pid, tid, event)
def write_start_stop_event(event):
with flock(state.options.out_fd):
safe_write(state.options.out_fd,
event_header(time.time(), os.getpid(), 0, event))
def write_stop():
write_start_stop_event(io.STOP_EVENT)
def write_start():
write_start_stop_event(io.START_EVENT)
def write_sample(now, pid, tid, frame):
buf = cStringIO.StringIO()
buf.write(event_header(now, pid, tid, io.SAMPLE_EVENT))
while frame is not None:
if frame.f_code == state.options.ignore:
break
buf.write('%s\0%d\0%s\0%d\0\n' %
(os.path.abspath(frame.f_code.co_filename),
frame.f_lineno,
frame.f_code.co_name,
frame.f_code.co_firstlineno))
frame = frame.f_back
buf.write('\n')
with flock(state.options.out_fd):
safe_write(state.options.out_fd, buf.getvalue())
orig_greenlet = None
all_greenlets = None
greenlet_lock = threading.Lock()
def hijack_greenlet():
global orig_greenlet
global all_greenlets
import greenlet
try:
if orig_greenlet is None:
orig_greenlet = greenlet.greenlet
all_greenlets = set()
class Greenlet(orig_greenlet):
def __init__(self, *args, **kwargs):
orig_greenlet.__init__(self, *args, **kwargs)
with greenlet_lock:
all_greenlets.add(self)
# Grab all of the greenlet objects created before we hijacked.
with greenlet_lock:
for o in gc.get_objects():
if isinstance(o, orig_greenlet):
all_greenlets.add(o)
orig_greenlet = greenlet.greenlet
greenlet.greenlet = Greenlet
except:
orig_greenlet = None
raise
def frames():
for tid, frame in sys._current_frames().iteritems():
yield tid, frame
if state.options.sample_greenlets:
with greenlet_lock:
all_greenlets_copy = set(all_greenlets)
dead_greenlets = set()
try:
for gt in all_greenlets:
if gt.dead:
dead_greenlets.add(gt)
elif gt.gr_frame is not None:
yield id(gt), gt.gr_frame
finally:
with greenlet_lock:
all_greenlets.difference_update(dead_greenlets)
def collect_sample():
now = time.time()
pid = os.getpid()
current_tid = threading.current_thread().ident
for tid, frame in frames():
if tid != current_tid:
write_sample(now, pid, tid, frame)
def main_loop():
period = float(1) / state.options.frequency
last_sample_time = time.time() - period
if state.sampling:
write_start()
while True:
if state.sampling:
timeout = period
else:
timeout = None
ready = select.select([state.pipe[0]], [], [], timeout)
if ready[0]:
msg = os.read(state.pipe[0], 1)
if msg in (START_MSG, TOGGLE_MSG) and not state.sampling:
write_start()
state.sampling = True
elif msg in (STOP_MSG, TOGGLE_MSG) and state.sampling:
write_stop()
state.sampling = False
else:
raise Exception('Unknown message %r' % msg)
if not state.sampling:
continue
time_since_last_sample = time.time() - last_sample_time
if time_since_last_sample >= period:
collect_sample()
last_sample_time = time.time()
timeout = period
else:
timeout = period - time_since_last_sample
orig_os_fork = os.fork
def fork():
r, w = os.pipe()
pid = orig_os_fork()
if pid == 0:
os.close(r)
if threading.current_thread() == state.thread:
# Forking from our own thread. We could handle this enough to let an
# exec() happen before returning to the sampling loop.
raise NotImplementedError('fork() in sampling loop')
else:
if state.options.follow_fork and state.thread is not None:
options = state.options
options.autostart = state.sampling
options.out_fd = reopen(options.out_fd,
os.O_WRONLY | os.O_APPEND)
os.close(state.pipe[0])
os.close(state.pipe[1])
state.reset()
setup(options)
os.write(w, 'a')
os.close(w)
else:
os.close(w)
os.read(r, 1)
os.close(r)
return pid
def setup(options):
if state.thread is not None:
raise Exception('Profiling already started')
if options.sample_greenlets:
hijack_greenlet()
os.fork = fork
state.options = options
state.pipe = os.pipe()
set_cloexec(state.pipe[0])
set_cloexec(state.pipe[1])
state.sampling = options.autostart
def setup_handler(signo, handler):
if signo is not None:
x = signal.signal(signo, lambda signo, frame: handler())
if options.start_signal == options.stop_signal:
setup_handler(options.start_signal, toggle)
else:
setup_handler(options.start_signal, start)
setup_handler(options.stop_signal, stop)
# Start thread after signal handlers are setup so tests can safely send
# signals as soon as the first event is emitted.
state.thread = threading.Thread(target=main_loop, name='wcp')
state.thread.daemon = True
state.thread.start()
START_MSG = 's'
STOP_MSG = 'S'
TOGGLE_MSG = 't'
DETACH_MSG = 'd'
def start():
os.write(state.pipe[1], START_MSG)
def stop():
os.write(state.pipe[1], STOP_MSG)
def toggle():
os.write(state.pipe[1], TOGGLE_MSG)
| peterfeiner/wcp | wcp/record_impl.py | Python | gpl-2.0 | 7,068 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from mock import DEFAULT
from mock import MagicMock
from mock import patch
from trove.backup import models
from trove.backup import state
from trove.common import context
from trove.common import exception
from trove.common import utils
from trove.instance import models as instance_models
from trove.taskmanager import api
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
def _prep_conf(current_time):
current_time = str(current_time)
_context = context.TroveContext(tenant='TENANT-' + current_time)
instance_id = 'INSTANCE-' + current_time
return _context, instance_id
BACKUP_NAME = 'WORKS'
BACKUP_NAME_2 = 'IT-WORKS'
BACKUP_NAME_3 = 'SECOND-LAST-ONE'
BACKUP_NAME_4 = 'LAST-ONE-FULL'
BACKUP_NAME_5 = 'LAST-ONE-INCREMENTAL'
BACKUP_NAME_6 = 'LAST-ONE-DELETED'
BACKUP_STATE = state.BackupState.NEW
BACKUP_STATE_COMPLETED = state.BackupState.COMPLETED
BACKUP_DESC = 'Backup test'
BACKUP_FILENAME = '45a3d8cb-ade8-484c-a8a5-0c3c7286fb2f.xbstream.gz'
BACKUP_LOCATION = 'https://hpcs.com/tenant/database_backups/' + BACKUP_FILENAME
class BackupCreateTest(trove_testtools.TestCase):
def setUp(self):
super(BackupCreateTest, self).setUp()
util.init_db()
self.context, self.instance_id = _prep_conf(utils.utcnow())
self.created = False
def tearDown(self):
super(BackupCreateTest, self).tearDown()
if self.created:
models.DBBackup.find_by(
tenant_id=self.context.tenant).delete()
@patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock()))
def test_create(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.datastore_version = MagicMock()
instance.datastore_version.id = 'datastore-id-999'
instance.cluster_id = None
with patch.multiple(models.Backup,
validate_can_perform_action=DEFAULT,
verify_swift_auth_token=DEFAULT):
with patch.object(api.API, 'create_backup',
MagicMock(return_value=None)):
bu = models.Backup.create(self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
self.created = True
self.assertEqual(BACKUP_NAME, bu.name)
self.assertEqual(BACKUP_DESC, bu.description)
self.assertEqual(self.instance_id, bu.instance_id)
self.assertEqual(state.BackupState.NEW, bu.state)
db_record = models.DBBackup.find_by(id=bu.id)
self.assertEqual(bu.id, db_record['id'])
self.assertEqual(BACKUP_NAME, db_record['name'])
self.assertEqual(BACKUP_DESC, db_record['description'])
self.assertEqual(self.instance_id,
db_record['instance_id'])
self.assertEqual(state.BackupState.NEW,
db_record['state'])
self.assertEqual(instance.datastore_version.id,
db_record['datastore_version_id'])
@patch.object(api.API, 'get_client', MagicMock(return_value=MagicMock()))
def test_create_incremental(self):
instance = MagicMock()
parent = MagicMock(spec=models.DBBackup)
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.datastore_version = MagicMock()
instance.datastore_version.id = 'datastore-id-999'
instance.cluster_id = None
with patch.multiple(models.Backup,
validate_can_perform_action=DEFAULT,
verify_swift_auth_token=DEFAULT,
get_by_id=MagicMock(return_value=parent)):
with patch.object(api.API, 'create_backup',
MagicMock(return_value=None)):
incremental = models.Backup.create(
self.context,
self.instance_id,
BACKUP_NAME,
BACKUP_DESC,
parent_id='parent_uuid')
self.created = True
db_record = models.DBBackup.find_by(id=incremental.id)
self.assertEqual(incremental.id,
db_record['id'])
self.assertEqual(BACKUP_NAME,
db_record['name'])
self.assertEqual(BACKUP_DESC,
db_record['description'])
self.assertEqual(self.instance_id,
db_record['instance_id'])
self.assertEqual(state.BackupState.NEW,
db_record['state'])
self.assertEqual('parent_uuid',
db_record['parent_id'])
self.assertEqual(instance.datastore_version.id,
db_record['datastore_version_id'])
def test_create_instance_not_found(self):
self.assertRaises(exception.NotFound, models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
def test_create_incremental_not_found(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
instance.cluster_id = None
with patch.object(models.Backup, 'validate_can_perform_action',
return_value=None):
with patch.object(models.Backup, 'verify_swift_auth_token',
return_value=None):
self.assertRaises(exception.NotFound, models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC,
parent_id='BAD')
def test_create_instance_not_active(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
side_effect=exception.UnprocessableEntity)
self.assertRaises(exception.UnprocessableEntity,
models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
def test_create_backup_swift_token_invalid(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
with patch.object(models.Backup, 'validate_can_perform_action',
return_value=None):
with patch.object(models.Backup, 'verify_swift_auth_token',
side_effect=exception.SwiftAuthError):
self.assertRaises(exception.SwiftAuthError,
models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
def test_create_backup_datastore_operation_not_supported(self):
instance = MagicMock()
with patch.object(instance_models.BuiltInstance, 'load',
return_value=instance):
instance.validate_can_perform_action = MagicMock(
return_value=None)
with patch.object(
models.Backup, 'validate_can_perform_action',
side_effect=exception.DatastoreOperationNotSupported
):
self.assertRaises(exception.DatastoreOperationNotSupported,
models.Backup.create,
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
class BackupDeleteTest(trove_testtools.TestCase):
def setUp(self):
super(BackupDeleteTest, self).setUp()
util.init_db()
self.context, self.instance_id = _prep_conf(utils.utcnow())
def tearDown(self):
super(BackupDeleteTest, self).tearDown()
def test_delete_backup_not_found(self):
self.assertRaises(exception.NotFound, models.Backup.delete,
self.context, 'backup-id')
def test_delete_backup_is_running(self):
backup = MagicMock()
backup.is_running = True
with patch.object(models.Backup, 'get_by_id', return_value=backup):
self.assertRaises(exception.UnprocessableEntity,
models.Backup.delete, self.context, 'backup_id')
def test_delete_backup_swift_token_invalid(self):
backup = MagicMock()
backup.is_running = False
with patch.object(models.Backup, 'get_by_id', return_value=backup):
with patch.object(models.Backup, 'verify_swift_auth_token',
side_effect=exception.SwiftAuthError):
self.assertRaises(exception.SwiftAuthError,
models.Backup.delete,
self.context, 'backup_id')
class BackupORMTest(trove_testtools.TestCase):
def setUp(self):
super(BackupORMTest, self).setUp()
util.init_db()
self.context, self.instance_id = _prep_conf(utils.utcnow())
self.backup = models.DBBackup.create(tenant_id=self.context.tenant,
name=BACKUP_NAME,
state=BACKUP_STATE,
instance_id=self.instance_id,
deleted=False,
size=2.0,
location=BACKUP_LOCATION)
self.deleted = False
def tearDown(self):
super(BackupORMTest, self).tearDown()
if not self.deleted:
models.DBBackup.find_by(tenant_id=self.context.tenant).delete()
def test_list(self):
backups, marker = models.Backup.list(self.context)
self.assertIsNone(marker)
self.assertEqual(1, len(backups))
def test_list_for_instance(self):
models.DBBackup.create(tenant_id=self.context.tenant,
name=BACKUP_NAME_2,
state=BACKUP_STATE,
instance_id=self.instance_id,
size=2.0,
deleted=False)
backups, marker = models.Backup.list_for_instance(self.context,
self.instance_id)
self.assertIsNone(marker)
self.assertEqual(2, len(backups))
def test_get_last_completed(self):
models.DBBackup.create(tenant_id=self.context.tenant,
name=BACKUP_NAME_3,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
size=2.0,
deleted=False)
models.DBBackup.create(tenant_id=self.context.tenant,
name=BACKUP_NAME_4,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
size=2.0,
deleted=False)
models.DBBackup.create(tenant_id=self.context.tenant,
name=BACKUP_NAME_5,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
parent_id='parent_uuid',
size=2.0,
deleted=False)
models.DBBackup.create(tenant_id=self.context.tenant,
name=BACKUP_NAME_6,
state=BACKUP_STATE_COMPLETED,
instance_id=self.instance_id,
size=2.0,
deleted=True)
backup = models.Backup.get_last_completed(
self.context, self.instance_id, include_incremental=True)
self.assertEqual(BACKUP_NAME_5, backup.name)
backup = models.Backup.get_last_completed(
self.context, self.instance_id, include_incremental=False)
self.assertEqual(BACKUP_NAME_4, backup.name)
def test_running(self):
running = models.Backup.running(instance_id=self.instance_id)
self.assertTrue(running)
def test_not_running(self):
not_running = models.Backup.running(instance_id='non-existent')
self.assertFalse(not_running)
def test_running_exclude(self):
not_running = models.Backup.running(instance_id=self.instance_id,
exclude=self.backup.id)
self.assertFalse(not_running)
def test_is_running(self):
self.assertTrue(self.backup.is_running)
def test_is_done(self):
self.backup.state = state.BackupState.COMPLETED
self.backup.save()
self.assertTrue(self.backup.is_done)
def test_not_is_running(self):
self.backup.state = state.BackupState.COMPLETED
self.backup.save()
self.assertFalse(self.backup.is_running)
def test_not_is_done(self):
self.assertFalse(self.backup.is_done)
def test_backup_size(self):
db_record = models.DBBackup.find_by(id=self.backup.id)
self.assertEqual(self.backup.size, db_record.size)
def test_backup_delete(self):
backup = models.DBBackup.find_by(id=self.backup.id)
backup.delete()
backups, marker = models.Backup.list_for_instance(self.context,
self.instance_id)
self.assertIsNone(marker)
self.assertEqual(0, len(backups))
def test_delete(self):
self.backup.delete()
db_record = models.DBBackup.find_by(id=self.backup.id, deleted=True)
self.assertEqual(self.instance_id, db_record['instance_id'])
def test_deleted_not_running(self):
self.backup.delete()
self.assertFalse(models.Backup.running(self.instance_id))
def test_filename(self):
self.assertEqual(BACKUP_FILENAME, self.backup.filename)
class PaginationTests(trove_testtools.TestCase):
def setUp(self):
super(PaginationTests, self).setUp()
util.init_db()
self.context, self.instance_id = _prep_conf(utils.utcnow())
# Create a bunch of backups
bkup_info = {
'tenant_id': self.context.tenant,
'state': BACKUP_STATE,
'instance_id': self.instance_id,
'size': 2.0,
'deleted': False
}
for backup in xrange(50):
bkup_info.update({'name': 'Backup-%s' % backup})
models.DBBackup.create(**bkup_info)
def tearDown(self):
super(PaginationTests, self).tearDown()
query = models.DBBackup.query()
query.filter_by(instance_id=self.instance_id).delete()
def test_pagination_list(self):
# page one
backups, marker = models.Backup.list(self.context)
self.assertEqual(20, marker)
self.assertEqual(20, len(backups))
# page two
self.context.marker = 20
backups, marker = models.Backup.list(self.context)
self.assertEqual(40, marker)
self.assertEqual(20, len(backups))
# page three
self.context.marker = 40
backups, marker = models.Backup.list(self.context)
self.assertIsNone(marker)
self.assertEqual(10, len(backups))
def test_pagination_list_for_instance(self):
# page one
backups, marker = models.Backup.list_for_instance(self.context,
self.instance_id)
self.assertEqual(20, marker)
self.assertEqual(20, len(backups))
# page two
self.context.marker = 20
backups, marker = models.Backup.list(self.context)
self.assertEqual(40, marker)
self.assertEqual(20, len(backups))
# page three
self.context.marker = 40
backups, marker = models.Backup.list_for_instance(self.context,
self.instance_id)
self.assertIsNone(marker)
self.assertEqual(10, len(backups))
class OrderingTests(trove_testtools.TestCase):
def setUp(self):
super(OrderingTests, self).setUp()
util.init_db()
now = utils.utcnow()
self.context, self.instance_id = _prep_conf(now)
info = {
'tenant_id': self.context.tenant,
'state': BACKUP_STATE,
'instance_id': self.instance_id,
'size': 2.0,
'deleted': False
}
four = now - datetime.timedelta(days=4)
one = now - datetime.timedelta(days=1)
three = now - datetime.timedelta(days=3)
two = now - datetime.timedelta(days=2)
# Create backups out of order, save/create set the 'updated' field,
# so we need to use the db_api directly.
models.DBBackup().db_api.save(
models.DBBackup(name='four', updated=four,
id=utils.generate_uuid(), **info))
models.DBBackup().db_api.save(
models.DBBackup(name='one', updated=one,
id=utils.generate_uuid(), **info))
models.DBBackup().db_api.save(
models.DBBackup(name='three', updated=three,
id=utils.generate_uuid(), **info))
models.DBBackup().db_api.save(
models.DBBackup(name='two', updated=two,
id=utils.generate_uuid(), **info))
def tearDown(self):
super(OrderingTests, self).tearDown()
query = models.DBBackup.query()
query.filter_by(instance_id=self.instance_id).delete()
def test_list(self):
backups, marker = models.Backup.list(self.context)
self.assertIsNone(marker)
actual = [b.name for b in backups]
expected = [u'one', u'two', u'three', u'four']
self.assertEqual(expected, actual)
def test_list_for_instance(self):
backups, marker = models.Backup.list_for_instance(self.context,
self.instance_id)
self.assertIsNone(marker)
actual = [b.name for b in backups]
expected = [u'one', u'two', u'three', u'four']
self.assertEqual(expected, actual)
| fabian4/trove | trove/tests/unittests/backup/test_backup_models.py | Python | apache-2.0 | 20,319 |
"""Helpers for listening to events."""
import functools as ft
from datetime import timedelta
from ..const import (
ATTR_NOW, EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, MATCH_ALL)
from ..util import dt as dt_util
def track_state_change(hass, entity_ids, action, from_state=None,
to_state=None):
"""Track specific state changes.
entity_ids, from_state and to_state can be string or list.
Use list to match multiple.
Returns the listener that listens on the bus for EVENT_STATE_CHANGED.
Pass the return value into hass.bus.remove_listener to remove it.
"""
from_state = _process_match_param(from_state)
to_state = _process_match_param(to_state)
# Ensure it is a lowercase list with entity ids we want to match on
if isinstance(entity_ids, str):
entity_ids = (entity_ids.lower(),)
else:
entity_ids = tuple(entity_id.lower() for entity_id in entity_ids)
@ft.wraps(action)
def state_change_listener(event):
"""The listener that listens for specific state changes."""
if event.data['entity_id'] not in entity_ids:
return
if event.data['old_state'] is None:
old_state = None
else:
old_state = event.data['old_state'].state
if event.data['new_state'] is None:
new_state = None
else:
new_state = event.data['new_state'].state
if _matcher(old_state, from_state) and _matcher(new_state, to_state):
action(event.data['entity_id'],
event.data['old_state'],
event.data['new_state'])
hass.bus.listen(EVENT_STATE_CHANGED, state_change_listener)
return state_change_listener
def track_point_in_time(hass, action, point_in_time):
"""Add a listener that fires once after a spefic point in time."""
utc_point_in_time = dt_util.as_utc(point_in_time)
@ft.wraps(action)
def utc_converter(utc_now):
"""Convert passed in UTC now to local now."""
action(dt_util.as_local(utc_now))
return track_point_in_utc_time(hass, utc_converter, utc_point_in_time)
def track_point_in_utc_time(hass, action, point_in_time):
"""Add a listener that fires once after a specific point in UTC time."""
# Ensure point_in_time is UTC
point_in_time = dt_util.as_utc(point_in_time)
@ft.wraps(action)
def point_in_time_listener(event):
"""Listen for matching time_changed events."""
now = event.data[ATTR_NOW]
if now >= point_in_time and \
not hasattr(point_in_time_listener, 'run'):
# Set variable so that we will never run twice.
# Because the event bus might have to wait till a thread comes
# available to execute this listener it might occur that the
# listener gets lined up twice to be executed. This will make
# sure the second time it does nothing.
point_in_time_listener.run = True
hass.bus.remove_listener(EVENT_TIME_CHANGED,
point_in_time_listener)
action(now)
hass.bus.listen(EVENT_TIME_CHANGED, point_in_time_listener)
return point_in_time_listener
def track_sunrise(hass, action, offset=None):
"""Add a listener that will fire a specified offset from sunrise daily."""
from homeassistant.components import sun
offset = offset or timedelta()
def next_rise():
"""Return the next sunrise."""
next_time = sun.next_rising_utc(hass) + offset
while next_time < dt_util.utcnow():
next_time = next_time + timedelta(days=1)
return next_time
def sunrise_automation_listener(now):
"""Called when it's time for action."""
track_point_in_utc_time(hass, sunrise_automation_listener, next_rise())
action()
track_point_in_utc_time(hass, sunrise_automation_listener, next_rise())
def track_sunset(hass, action, offset=None):
"""Add a listener that will fire a specified offset from sunset daily."""
from homeassistant.components import sun
offset = offset or timedelta()
def next_set():
"""Return next sunrise."""
next_time = sun.next_setting_utc(hass) + offset
while next_time < dt_util.utcnow():
next_time = next_time + timedelta(days=1)
return next_time
def sunset_automation_listener(now):
"""Called when it's time for action."""
track_point_in_utc_time(hass, sunset_automation_listener, next_set())
action()
track_point_in_utc_time(hass, sunset_automation_listener, next_set())
# pylint: disable=too-many-arguments
def track_utc_time_change(hass, action, year=None, month=None, day=None,
hour=None, minute=None, second=None, local=False):
"""Add a listener that will fire if time matches a pattern."""
# We do not have to wrap the function with time pattern matching logic
# if no pattern given
if all(val is None for val in (year, month, day, hour, minute, second)):
@ft.wraps(action)
def time_change_listener(event):
"""Fire every time event that comes in."""
action(event.data[ATTR_NOW])
hass.bus.listen(EVENT_TIME_CHANGED, time_change_listener)
return time_change_listener
pmp = _process_match_param
year, month, day = pmp(year), pmp(month), pmp(day)
hour, minute, second = pmp(hour), pmp(minute), pmp(second)
@ft.wraps(action)
def pattern_time_change_listener(event):
"""Listen for matching time_changed events."""
now = event.data[ATTR_NOW]
if local:
now = dt_util.as_local(now)
mat = _matcher
# pylint: disable=too-many-boolean-expressions
if mat(now.year, year) and \
mat(now.month, month) and \
mat(now.day, day) and \
mat(now.hour, hour) and \
mat(now.minute, minute) and \
mat(now.second, second):
action(now)
hass.bus.listen(EVENT_TIME_CHANGED, pattern_time_change_listener)
return pattern_time_change_listener
# pylint: disable=too-many-arguments
def track_time_change(hass, action, year=None, month=None, day=None,
hour=None, minute=None, second=None):
"""Add a listener that will fire if UTC time matches a pattern."""
track_utc_time_change(hass, action, year, month, day, hour, minute, second,
local=True)
def _process_match_param(parameter):
"""Wrap parameter in a tuple if it is not one and returns it."""
if parameter is None or parameter == MATCH_ALL:
return MATCH_ALL
elif isinstance(parameter, str) and parameter.startswith('/'):
return parameter
elif isinstance(parameter, str) or not hasattr(parameter, '__iter__'):
return (parameter,)
else:
return tuple(parameter)
def _matcher(subject, pattern):
"""Return True if subject matches the pattern.
Pattern is either a tuple of allowed subjects or a `MATCH_ALL`.
"""
if isinstance(pattern, str) and pattern.startswith('/'):
try:
return subject % float(pattern.lstrip('/')) == 0
except ValueError:
return False
return MATCH_ALL == pattern or subject in pattern
| justyns/home-assistant | homeassistant/helpers/event.py | Python | mit | 7,340 |
from __future__ import absolute_import
from icons.literals import FIND, FIND_ADD, FIND_ARROW_LEFT
from icons import Icon
icon_search = Icon(FIND)
icon_advanced_search = Icon(FIND_ADD)
icon_search_again = Icon(FIND_ARROW_LEFT)
| smendez/lean | paart/apps/dynamic_search/icons.py | Python | gpl-3.0 | 228 |
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
from ansible.module_utils.facts.sysctl import get_sysctl
class DarwinHardware(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self, collected_facts=None):
hardware_facts = {}
self.sysctl = get_sysctl(self.module, ['hw', 'machdep', 'kern'])
mac_facts = self.get_mac_facts()
cpu_facts = self.get_cpu_facts()
memory_facts = self.get_memory_facts()
hardware_facts.update(mac_facts)
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
return hardware_facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
mac_facts = {}
rc, out, err = self.module.run_command("sysctl hw.model")
if rc == 0:
mac_facts['model'] = out.splitlines()[-1].split()[1]
mac_facts['osversion'] = self.sysctl['kern.osversion']
mac_facts['osrevision'] = self.sysctl['kern.osrevision']
return mac_facts
def get_cpu_facts(self):
cpu_facts = {}
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
cpu_facts['processor'] = self.sysctl['machdep.cpu.brand_string']
cpu_facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
cpu_facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
cpu_facts['processor_cores'] = self.sysctl['hw.physicalcpu']
cpu_facts['processor_vcpus'] = self.sysctl.get('hw.logicalcpu') or self.sysctl.get('hw.ncpu') or ''
return cpu_facts
def get_memory_facts(self):
memory_facts = {}
memory_facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
memory_facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
return memory_facts
class DarwinHardwareCollector(HardwareCollector):
_fact_class = DarwinHardware
_platform = 'Darwin'
| tsdmgz/ansible | lib/ansible/module_utils/facts/hardware/darwin.py | Python | gpl-3.0 | 3,527 |
#uses the blastn outfmt=1 output and organizes it in
#contig_name blast_result score evalue
import argparse
parser = argparse.ArgumentParser(description="blast output organization")
parser.add_argument("-in",dest="outputdoblast",required=True,help="Provide the blast output")
parser.add_argument("-out",dest="outputname",required=True,help="Provide the destination flie")
arg = parser.parse_args()
blast= open (arg.outputdoblast)
listanomes= open (arg.outputname , "w")
arraynomes=[]
arrayblast=[]
arrayscore=[]
arrayevalue=[]
arrayetc=[]
segundaparte=[]
for t in blast:
if (t.startswith("Query=")==True):
u=0
s= t.strip('Query= ' "\n")
l= s + "\t"
arraynomes.append(l)
if (t.startswith("emb")==True or t.startswith("gb")==True and u==0):
if (u==0):
e=t.rstrip(" \n")
g= e.rsplit(" ",1)
print g
f=g[0].rsplit(" ",1)
print f
nome= f[0]+ "\t"
arrayblast.append(nome)
score=f[1]+ "\t"
arrayscore.append(score)
arrayevalue.append(g[1])
u+=1
if (t.startswith("*")==True):
arrayblast.append("na\t")
arrayscore.append("na\t")
arrayevalue.append("na")
print arraynomes
s=0
while (s<len (arrayblast)):
listanomes.write(arraynomes[s])
listanomes.write(arrayblast[s])
listanomes.write(arrayscore[s])
listanomes.write(arrayevalue[s])
listanomes.write("\n")
s+=1
blast.close()
listanomes.close()
| Nymeria8/SpeciesSplit | blastaaa1.py | Python | gpl-2.0 | 1,545 |
# -*- coding: utf-8 -*-
#
# pyvips documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 9 15:19:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
]
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyvips'
copyright = u'2017, John Cupitt'
author = u'John Cupitt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2.0'
# The full version, including alpha/beta/rc tags.
release = u'2.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['global.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
html_context = {
# Enable the "Edit on GitHub" link within the header of each page.
'display_github': True,
# Set the following variables to generate the resulting github URL for
# each page.
'github_user': 'jcupitt',
'github_repo': 'pyvips',
'github_version': 'master/doc/'
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyvipsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyvips.tex', u'pyvips Documentation',
u'john', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyvips', u'pyvips Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyvips', u'pyvips Documentation',
author, 'pyvips', 'One line description of project.',
'Miscellaneous'),
]
# see https://stackoverflow.com/questions/20569011
# adds autoautosummary directive, see vimage.rst
# try to exclude deprecated
def skip_deprecated(app, what, name, obj, skip, options):
if hasattr(obj, "func_dict") and "__deprecated__" in obj.func_dict:
print("skipping " + name)
return True
return skip or False
def setup(app):
app.connect('autodoc-skip-member', skip_deprecated)
try:
from sphinx.ext.autosummary import Autosummary
from sphinx.ext.autosummary import get_documenter
from docutils.parsers.rst import directives
from sphinx.util.inspect import safe_getattr
class AutoAutoSummary(Autosummary):
option_spec = {
'methods': directives.unchanged,
'attributes': directives.unchanged
}
required_arguments = 1
@staticmethod
def get_members(obj, typ, include_public=None):
if not include_public:
include_public = []
items = []
for name in dir(obj):
try:
documenter = get_documenter(safe_getattr(obj, name),
obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
def run(self):
clazz = str(self.arguments[0])
try:
(module_name, class_name) = clazz.rsplit('.', 1)
m = __import__(module_name, globals(), locals(),
[class_name])
c = getattr(m, class_name)
if 'methods' in self.options:
_, methods = self.get_members(c,
'method', ['__init__'])
self.content = ["~%s.%s" % (clazz, method)
for method in methods
if not method.startswith('_')]
if 'attributes' in self.options:
_, attribs = self.get_members(c, 'attribute')
self.content = ["~%s.%s" % (clazz, attrib)
for attrib in attribs
if not attrib.startswith('_')]
finally:
return super(AutoAutoSummary, self).run()
app.add_directive('autoautosummary', AutoAutoSummary)
except BaseException as e:
raise e
| kleisauke/pyvips | doc/conf.py | Python | mit | 8,396 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import sys
import time
import requests
from bs4 import BeautifulSoup as bs
from sasila.system_normal.downloader.web_driver_pool import get_web_driver_pool
from sasila.system_normal.utils.cookie import formart_selenium_cookies
from sasila.system_normal.utils import logger
from sasila.system_normal.utils import jd_code
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf-8')
def abstract(text, start, end):
if text is None or text == '':
return ''
res = ''
if start is not None and start != '':
if start not in text:
return res
else:
text = text[text.index(start) + len(start):]
if end is not None and end != '':
if end not in text:
return res
else:
res = text[0:text.index(end)]
else:
res = text
return res
class JdMessage(object):
def __init__(self):
self.code = ""
self.code_description = ""
self.cookies = ""
self.qr_captcha = ""
class JdRequest(object):
def __init__(self):
self.web_driver_pool = None # type: Queue
def init_pool(self):
logger.info('init web driver pool...')
self.web_driver_pool = get_web_driver_pool(1)
logger.info('init web driver pool success...')
def login(self, account, password):
message = JdMessage()
web = self.web_driver_pool.get() # type: webdriver.PhantomJS
web.delete_all_cookies()
web.get("https://passport.jd.com/new/login.aspx?ReturnUrl=http%3A%2F%2Fhome.jd.com%2F")
element = web.find_element_by_css_selector("div.login-tab.login-tab-r").find_element_by_css_selector("a")
element.click()
element = web.find_element_by_id("loginname")
element.clear()
element.send_keys(account)
element = web.find_element_by_id("nloginpwd")
element.clear()
element.send_keys(password)
element = web.find_element_by_css_selector("a#loginsubmit")
element.click()
time.sleep(3)
if '我的京东' in bs(web.execute_script("return document.documentElement.outerHTML"), 'lxml').title.string:
message.code = jd_code.SUCCESS
message.code_description = "登录成功"
message.cookies = formart_selenium_cookies(web.get_cookies())
else:
# 需要手机验证码等等状况
pass
self.web_driver_pool.put(web)
return message
def qr_login(self):
message = JdMessage()
headers = dict()
headers[
"User-Agent"] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
headers["Accept"] = "*/*"
headers["Accept-Encoding"] = "gzip, deflate"
headers["Accept-Language"] = "zh-CN,en,*"
headers["Referer"] = "https://passport.jd.com/new/login.aspx?ReturnUrl=http%3A%2F%2Fhome.jd.com%2F"
session = requests.Session()
response = session.get("https://qr.m.jd.com/show?appid=133&size=147&t=" + str(time.time()))
message.code = jd_code.SUCCESS
message.qr_captcha = response.content.encode("base64")
message.cookies = json.dumps(session.cookies.get_dict()).decode("unicode-escape")
return message
def submit_qrlogin(self, cookies):
message = JdMessage()
headers = dict()
headers[
"User-Agent"] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36"
headers["Accept"] = "*/*"
headers["Accept-Encoding"] = "gzip, deflate"
headers["Accept-Language"] = "zh-CN,en,*"
headers["Referer"] = "https://passport.jd.com/new/login.aspx?ReturnUrl=http%3A%2F%2Fhome.jd.com%2F"
session = requests.Session()
response = session.get("https://qr.m.jd.com/check?callback=jQuery6172296&appid=133&_=1486609849337",
cookies=json.loads(cookies),
headers=headers)
ticket = abstract(response.content, '\"ticket\" : \"', '\"')
headers['X-Requested-With'] = 'XMLHttpRequest'
response = session.get("https://passport.jd.com/uc/qrCodeTicketValidation?t=" + ticket, headers=headers)
message.code = jd_code.SUCCESS
message.code_description = "登录成功"
message.cookies = json.dumps(session.cookies.get_dict()).decode("unicode-escape")
return message
| DarkSand/Sasila | sasila/system_instant/crawler/jd/request.py | Python | apache-2.0 | 4,578 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron.plugins.ml2.drivers.linuxbridge.mech_driver \
import mech_linuxbridge
from neutron.tests.unit.plugins.ml2 import _test_mech_agent as base
class LinuxbridgeMechanismBaseTestCase(base.AgentMechanismBaseTestCase):
VIF_TYPE = portbindings.VIF_TYPE_BRIDGE
CAP_PORT_FILTER = True
AGENT_TYPE = constants.AGENT_TYPE_LINUXBRIDGE
GOOD_MAPPINGS = {'fake_physical_network': 'fake_interface'}
GOOD_TUNNEL_TYPES = ['gre', 'vxlan']
GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS,
'tunnel_types': GOOD_TUNNEL_TYPES}
BAD_MAPPINGS = {'wrong_physical_network': 'wrong_interface'}
BAD_TUNNEL_TYPES = ['bad_tunnel_type']
BAD_CONFIGS = {'interface_mappings': BAD_MAPPINGS,
'tunnel_types': BAD_TUNNEL_TYPES}
AGENTS = [{'alive': True,
'configurations': GOOD_CONFIGS,
'host': 'host'}]
AGENTS_DEAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'dead_host'}]
AGENTS_BAD = [{'alive': False,
'configurations': GOOD_CONFIGS,
'host': 'bad_host_1'},
{'alive': True,
'configurations': BAD_CONFIGS,
'host': 'bad_host_2'}]
def setUp(self):
super(LinuxbridgeMechanismBaseTestCase, self).setUp()
self.driver = mech_linuxbridge.LinuxbridgeMechanismDriver()
self.driver.initialize()
class LinuxbridgeMechanismGenericTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismGenericTestCase):
pass
class LinuxbridgeMechanismLocalTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismLocalTestCase):
pass
class LinuxbridgeMechanismFlatTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismFlatTestCase):
pass
class LinuxbridgeMechanismVlanTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismVlanTestCase):
pass
class LinuxbridgeMechanismGreTestCase(LinuxbridgeMechanismBaseTestCase,
base.AgentMechanismGreTestCase):
pass
| noironetworks/neutron | neutron/tests/unit/plugins/ml2/drivers/linuxbridge/mech_driver/test_mech_linuxbridge.py | Python | apache-2.0 | 2,987 |
def handler(db_conn, event):
return 'hello'
| Vishakha1990/Lambdas | testing/lambdas/hello.py | Python | apache-2.0 | 48 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import atexit
import argparse
import getpass
import sys
import textwrap
import time
from pyVim import connect
from pyVmomi import vim
import requests
requests.packages.urllib3.disable_warnings()
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def get_args():
parser = argparse.ArgumentParser()
# because -h is reserved for 'help' we use -s for service
parser.add_argument('-s', '--host',
required=True,
action='store',
help='vSphere service to connect to')
# because we want -p for password, we use -o for port
parser.add_argument('-o', '--port',
type=int,
default=443,
action='store',
help='Port to connect on')
parser.add_argument('-u', '--user',
required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=False,
action='store',
help='Password to use when connecting to host')
parser.add_argument('-e', '--esxi',
required=True,
action='store',
help='Name of the ESXi to look for.')
args = parser.parse_args()
if not args.password:
args.password = getpass.getpass(
prompt='Enter password for host %s and user %s: ' %
(args.host, args.user))
return args
def _create_char_spinner():
"""Creates a generator yielding a char based spinner.
"""
while True:
for c in '|/-\\':
yield c
_spinner = _create_char_spinner()
def spinner(label=''):
"""Prints label with a spinner.
When called repeatedly from inside a loop this prints
a one line CLI spinner.
"""
sys.stdout.write("\r\t%s %s" % (label, _spinner.next()))
sys.stdout.flush()
def get_obj(content, vim_type, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vim_type, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
# form a connection...
args = get_args()
si = connect.SmartConnect(host=args.host, user=args.user, pwd=args.password,
port=args.port)
# doing this means you don't need to remember to disconnect your script/objects
atexit.register(connect.Disconnect, si)
content = si.RetrieveContent()
esxi = get_obj(content,[vim.HostSystem], args.esxi)
vms = esxi.vm
for vm in vms:
print vm.name
sys.exit(0)
| kenelite/vmapi | 050getvmlist.py | Python | mit | 3,072 |
GREEN = '\033[92m'
BLUE = '\033[94m'
PINK = '\033[95m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m' | valferon/stageaobd2 | utils.py | Python | gpl-2.0 | 109 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lecture10.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| CSUChico-CINS465/CINS465-Fall2016-Lecture-Examples | lecture10/lecture10/manage.py | Python | mit | 807 |
#pragma repy
def foo(ip,port,sockobj, ch,mainch):
stopcomm(mainch)
stopcomm(ch)
if callfunc == 'initialize':
waitforconn('127.0.0.1',<connport>,foo)
sleep(.1)
openconn('127.0.0.1',<connport>)
| sburnett/seattle | repy/tests/ut_repytests_teststopcomm2.py | Python | mit | 204 |
#!/usr/bin/python
'''
(C) 2010-2012 ICM UW. All rights reserved.
'''
import string;
import sys;
import re;
import collections;
import pickle
f_key = open(sys.argv[1],'r')
a_key = pickle.load(f_key)
f_key.close()
a_val = sys.argv[2].split('_')
in_path=sys.argv[3]
name_from = sys.argv[4];
name_to = sys.argv[5];
out_path=string.replace(sys.argv[3],name_from,name_to)
f_in = open(in_path,'r')
f_out = open(out_path,'w')
for line in f_in:
written=0
for i in range(0,len(a_key)):
key=a_key[i]
if key in line:
newkey=string.replace(key,'@','').split(' ')[0]
ret = newkey+'='+a_val[i]+'\n'
f_out.write(ret)
written=1
break
if written == 0:
f_out.write(line)
f_in.close()
f_out.close()
print out_path
| CeON/CoAnSys | document-classification/document-classification-logic/src/main/python/opts_chooser.py | Python | agpl-3.0 | 727 |
{
#LDFLAGS='-lm'
'repo_type' : 'svn',
'url' : 'svn://svn.code.sf.net/p/xavs/code/trunk',
'folder_name' : 'xavs_svn',
'configure_options' : '{autoconf_prefix_options} --cross-prefix={cross_prefix_bare}',
'run_post_install' : [
'rm -f NUL', # uh???
],
'packages' : {
'arch' : [ 'yasm' ],
},
'_info' : { 'version' : 'svn (master)', 'fancy_name' : 'xavs' },
} | DeadSix27/python_cross_compile_script | packages/dependencies/xavs.py | Python | mpl-2.0 | 369 |
import action
import entityeffect
class UnequipAction(action.Action):
"""
An Item with this component can be unequipped.
If an item is already in that
equipment slot that item will be unequipped first.
"""
def __init__(self):
super(UnequipAction, self).__init__()
self.component_type = "unequip_action"
self.name = "Remove"
self.display_order = 90
def act(self, **kwargs):
"""
Will attempt to unequip item to the given equipment_slot.
"""
target_entity = kwargs[action.TARGET_ENTITY]
source_entity = kwargs[action.SOURCE_ENTITY]
equipment_slot = kwargs[action.EQUIPMENT_SLOT]
if(target_entity.equipment.slot_is_equiped(equipment_slot)):
self._unequip(target_entity, equipment_slot)
self.add_energy_spent_to_entity(source_entity)
def can_act(self, **kwargs):
"""
Returns true if it's legal for the entity to unequip item.
"""
source_entity = kwargs[action.SOURCE_ENTITY]
equipment_slot = kwargs[action.EQUIPMENT_SLOT]
return (source_entity.inventory.has_room_for_item() and
source_entity.equipment.slot_is_equiped(equipment_slot))
def _unequip(self, target_entity, equipment_slot):
"""
Performs the actual unequip.
"""
if(not target_entity.equipment.slot_is_equiped(equipment_slot)):
return
unequip_effect = entityeffect.Unequip(target_entity, equipment_slot)
target_entity.effect_queue.add(unequip_effect)
| co/TheLastRogue | equipactions.py | Python | bsd-2-clause | 1,584 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.compute import keypairs
get_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'}
},
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
create_keypair = {
'status_code': [201],
'response_body': keypairs.create_keypair
}
delete_keypair = {
'status_code': [204],
}
| vedujoshi/os_tempest | tempest/api_schema/compute/v3/keypairs.py | Python | apache-2.0 | 1,333 |
from ..internal.Addon import Addon
class LinkFilter(Addon):
__name__ = "LinkFilter"
__type__ = "hook"
__version__ = "0.14"
__status__ = "testing"
__config__ = [("activated", "bool", "Activated", False),
("filter", "str", "Filter links containing (seperate by comma)", "ul.to,share-online.biz")]
__description__ = "Filters all added links"
__license__ = "GPLv3"
__authors__ = [("segelkma", None)]
def activate(self):
self.manager.addEvent('linksAdded', self.filter_links)
def deactivate(self):
self.manager.removeEvent('linksAdded', self.filter_links)
def filter_links(self, links, pid):
filter_entries = self.config.get('filter').split(',')
for filter in filter_entries:
if filter == "":
break
linkcount = len(links)
links[:] = [link for link in links if link.find(filter) == -1]
linkcount -= len(links)
if linkcount > 0:
linkstring = 'links' if linkcount > 1 else 'link'
self.log_info(
"Removed %s %s containing %s" %
(linkcount, linkstring, filter))
| Velociraptor85/pyload | module/plugins/hooks/LinkFilter.py | Python | gpl-3.0 | 1,205 |
"""
This module implements an interface to the critic2 Bader analysis code.
For most Bader analysis purposes, users are referred to
pymatgen.command_line.bader_caller instead, this module is for advanced
usage requiring identification of critical points in the charge density.
This module depends on a compiled critic2 executable available in the path.
Please follow the instructions at https://github.com/aoterodelaroza/critic2
to compile.
New users are *strongly* encouraged to read the critic2 manual first.
In brief,
* critic2 searches for critical points in charge density
* a critical point can be one of four types: nucleus, bond, ring
or cage
* it does this by seeding locations for likely critical points
and then searching in these regions
* there are two lists of critical points in the output, a list
of non-equivalent points (with in-depth information about the
field at those points), and a full list of points generated
by the appropriate symmetry operations
* connectivity between these points is also provided when
appropriate (e.g. the two nucleus critical points linked to
a bond critical point)
* critic2 can do many other things besides
If you use this module, please cite the following:
A. Otero-de-la-Roza, E. R. Johnson and V. Luaña,
Comput. Phys. Commun. 185, 1007-1018 (2014)
(https://doi.org/10.1016/j.cpc.2013.10.026)
A. Otero-de-la-Roza, M. A. Blanco, A. Martín Pendás and
V. Luaña, Comput. Phys. Commun. 180, 157–166 (2009)
(https://doi.org/10.1016/j.cpc.2008.07.018)
"""
import logging
import os
import subprocess
import warnings
from enum import Enum
import numpy as np
from monty.dev import requires
from monty.json import MSONable
from monty.os.path import which
from monty.serialization import loadfn
from monty.tempfile import ScratchDir
from scipy.spatial import KDTree
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.command_line.bader_caller import get_filepath
from pymatgen.core.periodic_table import DummySpecies
from pymatgen.io.vasp.inputs import Potcar
from pymatgen.io.vasp.outputs import Chgcar, VolumetricData
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Critic2Caller:
"""
Class to call critic2 and store standard output for further processing.
"""
@requires(
which("critic2"),
"Critic2Caller requires the executable critic to be in the path. "
"Please follow the instructions at https://github.com/aoterodelaroza/critic2.",
)
def __init__(self, input_script):
"""
Run Critic2 on a given input script
:param input_script: string defining the critic2 input
"""
# store if examining the input script is useful,
# not otherwise used
self._input_script = input_script
with open("input_script.cri", "w") as f:
f.write(input_script)
args = ["critic2", "input_script.cri"]
with subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True) as rs:
stdout, stderr = rs.communicate()
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
warnings.warn(stderr)
if rs.returncode != 0:
raise RuntimeError(f"critic2 exited with return code {rs.returncode}: {stdout}")
self._stdout = stdout
self._stderr = stderr
if os.path.exists("cpreport.json"):
cpreport = loadfn("cpreport.json")
else:
cpreport = None
self._cpreport = cpreport
if os.path.exists("yt.json"):
yt = loadfn("yt.json")
else:
yt = None
self._yt = yt
@classmethod
def from_chgcar(
cls,
structure,
chgcar=None,
chgcar_ref=None,
user_input_settings=None,
write_cml=False,
write_json=True,
zpsp=None,
):
"""
Run Critic2 in automatic mode on a supplied structure, charge
density (chgcar) and reference charge density (chgcar_ref).
The reason for a separate reference field is that in
VASP, the CHGCAR charge density only contains valence
electrons and may be missing substantial charge at
nuclei leading to misleading results. Thus, a reference
field is commonly constructed from the sum of AECCAR0
and AECCAR2 which is the total charge density, but then
the valence charge density is used for the final analysis.
If chgcar_ref is not supplied, chgcar will be used as the
reference field. If chgcar is not supplied, the promolecular
charge density will be used as the reference field -- this can
often still give useful results if only topological information
is wanted.
User settings is a dictionary that can contain:
* GRADEPS, float (field units), gradient norm threshold
* CPEPS, float (Bohr units in crystals), minimum distance between
critical points for them to be equivalent
* NUCEPS, same as CPEPS but specifically for nucleus critical
points (critic2 default is depedent on grid dimensions)
* NUCEPSH, same as NUCEPS but specifically for hydrogen nuclei
since associated charge density can be significantly displaced
from hydrogen nucleus
* EPSDEGEN, float (field units), discard critical point if any
element of the diagonal of the Hessian is below this value,
useful for discarding points in vacuum regions
* DISCARD, float (field units), discard critical points with field
value below this value, useful for discarding points in vacuum
regions
* SEED, list of strings, strategies for seeding points, default
is ['WS 1', 'PAIR 10'] which seeds critical points by
sub-dividing the Wigner-Seitz cell and between every atom pair
closer than 10 Bohr, see critic2 manual for more options
:param structure: Structure to analyze
:param chgcar: Charge density to use for analysis. If None, will
use promolecular density. Should be a Chgcar object or path (string).
:param chgcar_ref: Reference charge density. If None, will use
chgcar as reference. Should be a Chgcar object or path (string).
:param user_input_settings (dict): as explained above
:param write_cml (bool): Useful for debug, if True will write all
critical points to a file 'table.cml' in the working directory
useful for visualization
:param write_json (bool): Whether to write out critical points
and YT json. YT integration will be performed with this setting.
:param zpsp (dict): Dict of element/symbol name to number of electrons
(ZVAL in VASP pseudopotential), with which to properly augment core regions
and calculate charge transfer. Optional.
"""
settings = {"CPEPS": 0.1, "SEED": ["WS", "PAIR DIST 10"]}
if user_input_settings:
settings.update(user_input_settings)
# Load crystal structure
input_script = ["crystal POSCAR"]
# Load data to use as reference field
if chgcar_ref:
input_script += ["load ref.CHGCAR id chg_ref", "reference chg_ref"]
# Load data to use for analysis
if chgcar:
input_script += ["load int.CHGCAR id chg_int", "integrable chg_int"]
if zpsp:
zpsp_str = " zpsp " + " ".join([f"{symbol} {int(zval)}" for symbol, zval in zpsp.items()])
input_script[-2] += zpsp_str
# Command to run automatic analysis
auto = "auto "
for k, v in settings.items():
if isinstance(v, list):
for item in v:
auto += f"{k} {item} "
else:
auto += f"{k} {v} "
input_script += [auto]
if write_cml:
input_script += ["cpreport ../table.cml cell border graph"]
if write_json:
input_script += ["cpreport cpreport.json"]
if write_json and chgcar:
# requires gridded data to work
input_script += ["yt"]
input_script += ["yt JSON yt.json"]
input_script = "\n".join(input_script)
with ScratchDir(".") as temp_dir:
os.chdir(temp_dir)
structure.to(filename="POSCAR")
if chgcar and isinstance(chgcar, VolumetricData):
chgcar.write_file("int.CHGCAR")
elif chgcar:
os.symlink(chgcar, "int.CHGCAR")
if chgcar_ref and isinstance(chgcar_ref, VolumetricData):
chgcar_ref.write_file("ref.CHGCAR")
elif chgcar_ref:
os.symlink(chgcar_ref, "ref.CHGCAR")
caller = cls(input_script)
caller.output = Critic2Analysis(
structure,
stdout=caller._stdout,
stderr=caller._stderr,
cpreport=caller._cpreport,
yt=caller._yt,
zpsp=zpsp,
)
return caller
@classmethod
def from_path(cls, path, suffix="", zpsp=None):
"""
Convenience method to run critic2 analysis on a folder containing
typical VASP output files.
This method will:
1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped
counterparts.
2. If AECCAR* files are present, constructs a temporary reference
file as AECCAR0 + AECCAR2.
3. Runs critic2 analysis twice: once for charge, and a second time
for the charge difference (magnetization density).
:param path: path to folder to search in
:param suffix: specific suffix to look for (e.g. '.relax1' for
'CHGCAR.relax1.gz')
:param zpsp: manually specify ZPSP if POTCAR not present
:return:
"""
chgcar_path = get_filepath("CHGCAR", "Could not find CHGCAR!", path, suffix)
chgcar = Chgcar.from_file(chgcar_path)
chgcar_ref = None
if not zpsp:
potcar_path = get_filepath(
"POTCAR",
"Could not find POTCAR, will not be able to calculate charge transfer.",
path,
suffix,
)
if potcar_path:
potcar = Potcar.from_file(potcar_path)
zpsp = {p.element: p.zval for p in potcar}
if not zpsp:
# try and get reference "all-electron-like" charge density if zpsp not present
aeccar0_path = get_filepath(
"AECCAR0",
"Could not find AECCAR0, interpret Bader results with caution.",
path,
suffix,
)
aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None
aeccar2_path = get_filepath(
"AECCAR2",
"Could not find AECCAR2, interpret Bader results with caution.",
path,
suffix,
)
aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None
chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None
return cls.from_chgcar(chgcar.structure, chgcar, chgcar_ref, zpsp=zpsp)
class CriticalPointType(Enum):
"""
Enum type for the different varieties of critical point.
"""
nucleus = "nucleus" # (3, -3)
bond = "bond" # (3, -1)
ring = "ring" # (3, 1)
cage = "cage" # (3, 3)
nnattr = "nnattr" # (3, -3), non-nuclear attractor
class CriticalPoint(MSONable):
"""
Access information about a critical point and the field values at that point.
"""
def __init__(
self,
index,
type,
frac_coords,
point_group,
multiplicity,
field,
field_gradient,
coords=None,
field_hessian=None,
):
"""
Class to characterise a critical point from a topological
analysis of electron charge density.
Note this class is usually associated with a Structure, so
has information on multiplicity/point group symmetry.
:param index: index of point
:param type: type of point, given as a string
:param coords: Cartesian co-ordinates in Angstroms
:param frac_coords: fractional co-ordinates
:param point_group: point group associated with critical point
:param multiplicity: number of equivalent critical points
:param field: value of field at point (f)
:param field_gradient: gradient of field at point (grad f)
:param field_hessian: hessian of field at point (del^2 f)
"""
self.index = index
self._type = type
self.coords = coords
self.frac_coords = frac_coords
self.point_group = point_group
self.multiplicity = multiplicity
self.field = field
self.field_gradient = field_gradient
self.field_hessian = field_hessian
@property
def type(self):
"""
Returns: Instance of CriticalPointType
"""
return CriticalPointType(self._type)
def __str__(self):
return f"Critical Point: {self.type.name} ({self.frac_coords})"
@property
def laplacian(self):
"""
Returns: The Laplacian of the field at the critical point
"""
return np.trace(self.field_hessian)
@property
def ellipticity(self):
"""
Most meaningful for bond critical points,
can be physically interpreted as e.g. degree
of pi-bonding in organic molecules. Consult
literature for more information.
Returns: The ellpiticity of the field at the critical point
"""
eig, _ = np.linalg.eig(self.field_hessian)
eig.sort()
return eig[0] / eig[1] - 1
class Critic2Analysis(MSONable):
"""
Class to process the standard output from critic2 into pymatgen-compatible objects.
"""
def __init__(self, structure, stdout=None, stderr=None, cpreport=None, yt=None, zpsp=None):
"""
This class is used to store results from the Critic2Caller.
To explore the bond graph, use the "structure_graph"
method, which returns a user-friendly StructureGraph
class with bonding information. By default, this returns
a StructureGraph with edge weights as bond lengths, but
can optionally return a graph with edge weights as any
property supported by the `CriticalPoint` class, such as
bond ellipticity.
This class also provides an interface to explore just the
non-symmetrically-equivalent critical points via the
`critical_points` attribute, and also all critical
points (via nodes dict) and connections between them
(via edges dict). The user should be familiar with critic2
before trying to understand these.
Indexes of nucleus critical points in the nodes dict are the
same as the corresponding sites in structure, with indices of
other critical points arbitrarily assigned.
Only one of (stdout, cpreport) required, with cpreport preferred
since this is a new, native JSON output from critic2.
:param structure: associated Structure
:param stdout: stdout from running critic2 in automatic
mode
:param stderr: stderr from running critic2 in automatic
mode
:param cpreport: json output from CPREPORT command
:param yt: json output from YT command
:param zpsp (dict): Dict of element/symbol name to number of electrons
(ZVAL in VASP pseudopotential), with which to calculate charge transfer.
Optional.
"""
self.structure = structure
self._stdout = stdout
self._stderr = stderr
self._cpreport = cpreport
self._yt = yt
self._zpsp = zpsp
self.nodes = {}
self.edges = {}
if yt:
self.structure = self._annotate_structure_with_yt(yt, structure, zpsp)
if cpreport:
self._parse_cpreport(cpreport)
elif stdout:
self._parse_stdout(stdout)
else:
raise ValueError("One of cpreport or stdout required.")
self._remap_indices()
def structure_graph(self, include_critical_points=("bond", "ring", "cage")):
"""
A StructureGraph object describing bonding information
in the crystal.
Args:
include_critical_points: add DummySpecies for
the critical points themselves, a list of
"nucleus", "bond", "ring", "cage", set to None
to disable
Returns: a StructureGraph
"""
structure = self.structure.copy()
point_idx_to_struct_idx = {}
if include_critical_points:
# atoms themselves don't have field information
# so set to 0
for prop in ("ellipticity", "laplacian", "field"):
structure.add_site_property(prop, [0] * len(structure))
for idx, node in self.nodes.items():
cp = self.critical_points[node["unique_idx"]]
if cp.type.value in include_critical_points:
specie = DummySpecies(f"X{cp.type.value[0]}cp", oxidation_state=None)
structure.append(
specie,
node["frac_coords"],
properties={
"ellipticity": cp.ellipticity,
"laplacian": cp.laplacian,
"field": cp.field,
},
)
point_idx_to_struct_idx[idx] = len(structure) - 1
edge_weight = "bond_length"
edge_weight_units = "Å"
sg = StructureGraph.with_empty_graph(
structure,
name="bonds",
edge_weight_name=edge_weight,
edge_weight_units=edge_weight_units,
)
edges = self.edges.copy()
idx_to_delete = []
# check for duplicate bonds
for idx, edge in edges.items():
unique_idx = self.nodes[idx]["unique_idx"]
# only check edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
if idx not in idx_to_delete:
for idx2, edge2 in edges.items():
if idx != idx2 and edge == edge2:
idx_to_delete.append(idx2)
warnings.warn(
"Duplicate edge detected, try re-running "
"critic2 with custom parameters to fix this. "
"Mostly harmless unless user is also "
"interested in rings/cages."
)
logger.debug(
"Duplicate edge between points {} (unique point {})"
"and {} ({}).".format(
idx,
self.nodes[idx]["unique_idx"],
idx2,
self.nodes[idx2]["unique_idx"],
)
)
# and remove any duplicate bonds present
for idx in idx_to_delete:
del edges[idx]
for idx, edge in edges.items():
unique_idx = self.nodes[idx]["unique_idx"]
# only add edges representing bonds, not rings
if self.critical_points[unique_idx].type == CriticalPointType.bond:
from_idx = edge["from_idx"]
to_idx = edge["to_idx"]
# have to also check bond is between nuclei if non-nuclear
# attractors not in structure
skip_bond = False
if include_critical_points and "nnattr" not in include_critical_points:
from_type = self.critical_points[self.nodes[from_idx]["unique_idx"]].type
to_type = self.critical_points[self.nodes[from_idx]["unique_idx"]].type
skip_bond = (from_type != CriticalPointType.nucleus) or (to_type != CriticalPointType.nucleus)
if not skip_bond:
from_lvec = edge["from_lvec"]
to_lvec = edge["to_lvec"]
relative_lvec = np.subtract(to_lvec, from_lvec)
# for edge case of including nnattrs in bonding graph when other critical
# points also included, indices may get mixed
struct_from_idx = point_idx_to_struct_idx.get(from_idx, from_idx)
struct_to_idx = point_idx_to_struct_idx.get(to_idx, to_idx)
weight = self.structure.get_distance(struct_from_idx, struct_to_idx, jimage=relative_lvec)
crit_point = self.critical_points[unique_idx]
edge_properties = {
"field": crit_point.field,
"laplacian": crit_point.laplacian,
"ellipticity": crit_point.ellipticity,
"frac_coords": self.nodes[idx]["frac_coords"],
}
sg.add_edge(
struct_from_idx,
struct_to_idx,
from_jimage=from_lvec,
to_jimage=to_lvec,
weight=weight,
edge_properties=edge_properties,
)
return sg
def get_critical_point_for_site(self, n):
"""
Args:
n: Site index n
Returns: A CriticalPoint instance
"""
return self.critical_points[self.nodes[n]["unique_idx"]]
def get_volume_and_charge_for_site(self, n):
"""
Args:
n: Site index n
Returns: A dict containing "volume" and "charge" keys,
or None if YT integration not performed
"""
# pylint: disable=E1101
if not self._node_values:
return None
return self._node_values[n]
def _parse_cpreport(self, cpreport):
def get_type(signature: int, is_nucleus: bool):
if signature == 3:
return "cage"
if signature == 1:
return "ring"
if signature == -1:
return "bond"
if signature == -3:
if is_nucleus:
return "nucleus"
return "nnattr"
return None
bohr_to_angstrom = 0.529177
self.critical_points = [
CriticalPoint(
p["id"] - 1,
get_type(p["signature"], p["is_nucleus"]),
p["fractional_coordinates"],
p["point_group"],
p["multiplicity"],
p["field"],
p["gradient"],
coords=[x * bohr_to_angstrom for x in p["cartesian_coordinates"]]
if cpreport["units"] == "bohr"
else None,
field_hessian=p["hessian"],
)
for p in cpreport["critical_points"]["nonequivalent_cps"]
]
for idx, p in enumerate(cpreport["critical_points"]["cell_cps"]):
self._add_node(
idx=p["id"] - 1,
unique_idx=p["nonequivalent_id"] - 1,
frac_coords=p["fractional_coordinates"],
)
if "attractors" in p:
self._add_edge(
idx=p["id"] - 1,
from_idx=int(p["attractors"][0]["cell_id"]) - 1,
from_lvec=p["attractors"][0]["lvec"],
to_idx=int(p["attractors"][1]["cell_id"]) - 1,
to_lvec=p["attractors"][1]["lvec"],
)
def _remap_indices(self):
"""
Re-maps indices on self.nodes and self.edges such that node indices match
that of structure, and then sorts self.nodes by index.
"""
# Order of nuclei provided by critic2 doesn't
# necessarily match order of sites in Structure.
# This is because critic2 performs a symmetrization step.
# We perform a mapping from one to the other,
# and re-index all nodes accordingly.
node_mapping = {} # critic2_index:structure_index
# ensure frac coords are in [0,1] range
frac_coords = np.array(self.structure.frac_coords) % 1
kd = KDTree(frac_coords)
node_mapping = {}
for idx, node in self.nodes.items():
if self.critical_points[node["unique_idx"]].type == CriticalPointType.nucleus:
node_mapping[idx] = kd.query(node["frac_coords"])[1]
if len(node_mapping) != len(self.structure):
warnings.warn(
"Check that all sites in input structure ({}) have "
"been detected by critic2 ({}).".format(len(self.structure), len(node_mapping))
)
self.nodes = {node_mapping.get(idx, idx): node for idx, node in self.nodes.items()}
for edge in self.edges.values():
edge["from_idx"] = node_mapping.get(edge["from_idx"], edge["from_idx"])
edge["to_idx"] = node_mapping.get(edge["to_idx"], edge["to_idx"])
@staticmethod
def _annotate_structure_with_yt(yt, structure, zpsp):
volume_idx = None
charge_idx = None
for prop in yt["integration"]["properties"]:
if prop["label"] == "Volume":
volume_idx = prop["id"] - 1 # 1-indexed, change to 0
elif prop["label"] == "$chg_int":
charge_idx = prop["id"] - 1
def get_volume_and_charge(nonequiv_idx):
attractor = yt["integration"]["attractors"][nonequiv_idx - 1]
if attractor["id"] != nonequiv_idx:
raise ValueError(f"List of attractors may be un-ordered (wanted id={nonequiv_idx}): {attractor}")
return (
attractor["integrals"][volume_idx],
attractor["integrals"][charge_idx],
)
volumes = []
charges = []
charge_transfer = []
for idx, site in enumerate(yt["structure"]["cell_atoms"]):
if not np.allclose(structure[idx].frac_coords, site["fractional_coordinates"]):
raise IndexError(
"Site in structure doesn't seem to match site in YT integration:\n{}\n{}".format(
structure[idx], site
)
)
volume, charge = get_volume_and_charge(site["nonequivalent_id"])
volumes.append(volume)
charges.append(charge)
if zpsp:
if structure[idx].species_string in zpsp:
charge_transfer.append(charge - zpsp[structure[idx].species_string])
else:
raise ValueError(
"ZPSP argument does not seem compatible with species in structure ({}): {}".format(
structure[idx].species_string, zpsp
)
)
structure = structure.copy()
structure.add_site_property("bader_volume", volumes)
structure.add_site_property("bader_charge", charges)
if zpsp:
if len(charge_transfer) != len(charges):
warnings.warn(f"Something went wrong calculating charge transfer: {charge_transfer}")
else:
structure.add_site_property("bader_charge_transfer", charge_transfer)
return structure
def _parse_stdout(self, stdout):
warnings.warn(
"Parsing critic2 standard output is deprecated and will not be maintained, "
"please use the native JSON output in future."
)
stdout = stdout.split("\n")
# NOTE WE ARE USING 0-BASED INDEXING:
# This is different from critic2 which
# uses 1-based indexing, so all parsed
# indices have 1 subtracted.
# Parsing happens in two stages:
# 1. We construct a list of unique critical points
# (i.e. non-equivalent by the symmetry of the crystal)
# and the properties of the field at those points
# 2. We construct a list of nodes and edges describing
# all critical points in the crystal
# Steps 1. and 2. are essentially independent, except
# that the critical points in 2. have a pointer to their
# associated unique critical point in 1. so that more
# information on that point can be retrieved if necessary.
unique_critical_points = []
# parse unique critical points
for i, line in enumerate(stdout):
if "mult name f |grad| lap" in line:
start_i = i + 1
elif "* Analysis of system bonds" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if start_i <= i <= end_i:
l = line.replace("(", "").replace(")", "").split()
unique_idx = int(l[0]) - 1
point_group = l[1]
# type = l[2] # type from definition of critical point e.g. (3, -3)
critical_point_type = l[3] # type from name, e.g. nucleus
frac_coords = [float(l[4]), float(l[5]), float(l[6])]
multiplicity = float(l[7])
# name = float(l[8])
field = float(l[9])
field_gradient = float(l[10])
# laplacian = float(l[11])
point = CriticalPoint(
unique_idx,
critical_point_type,
frac_coords,
point_group,
multiplicity,
field,
field_gradient,
)
unique_critical_points.append(point)
for i, line in enumerate(stdout):
if "+ Critical point no." in line:
unique_idx = int(line.split()[4]) - 1
elif "Hessian:" in line:
l1 = list(map(float, stdout[i + 1].split()))
l2 = list(map(float, stdout[i + 2].split()))
l3 = list(map(float, stdout[i + 3].split()))
hessian = [
[l1[0], l1[1], l1[2]],
[l2[0], l2[1], l2[2]],
[l3[0], l3[1], l3[2]],
]
unique_critical_points[unique_idx].field_hessian = hessian
self.critical_points = unique_critical_points
# parse graph connecting critical points
for i, line in enumerate(stdout):
if "#cp ncp typ position " in line:
start_i = i + 1
elif "* Attractor connectivity matrix" in line:
end_i = i - 2
# if start_i and end_i haven't been found, we
# need to re-evaluate assumptions in this parser!
for i, line in enumerate(stdout):
if start_i <= i <= end_i:
l = line.replace("(", "").replace(")", "").split()
idx = int(l[0]) - 1
unique_idx = int(l[1]) - 1
frac_coords = [float(l[3]), float(l[4]), float(l[5])]
self._add_node(idx, unique_idx, frac_coords)
if len(l) > 6:
from_idx = int(l[6]) - 1
to_idx = int(l[10]) - 1
self._add_edge(
idx,
from_idx=from_idx,
from_lvec=(int(l[7]), int(l[8]), int(l[9])),
to_idx=to_idx,
to_lvec=(int(l[11]), int(l[12]), int(l[13])),
)
def _add_node(self, idx, unique_idx, frac_coords):
"""
Add information about a node describing a critical point.
:param idx: index
:param unique_idx: index of unique CriticalPoint,
used to look up more information of point (field etc.)
:param frac_coord: fractional co-ordinates of point
:return:
"""
self.nodes[idx] = {"unique_idx": unique_idx, "frac_coords": frac_coords}
def _add_edge(self, idx, from_idx, from_lvec, to_idx, to_lvec):
"""
Add information about an edge linking two critical points.
This actually describes two edges:
from_idx ------ idx ------ to_idx
However, in practice, from_idx and to_idx will typically be
atom nuclei, with the center node (idx) referring to a bond
critical point. Thus, it will be more convenient to model
this as a single edge linking nuclei with the properties
of the bond critical point stored as an edge attribute.
:param idx: index of node
:param from_idx: from index of node
:param from_lvec: vector of lattice image the from node is in
as tuple of ints
:param to_idx: to index of node
:param to_lvec: vector of lattice image the to node is in as
tuple of ints
:return:
"""
self.edges[idx] = {
"from_idx": from_idx,
"from_lvec": from_lvec,
"to_idx": to_idx,
"to_lvec": to_lvec,
}
| vorwerkc/pymatgen | pymatgen/command_line/critic2_caller.py | Python | mit | 33,844 |
from docopt import docopt
import sys
#transforms wlp files into plaintext
def main(args):
RAW = 0
LEMMA = 1
POS = 2
in_files = args["<wlp_files>"]
column = LEMMA if args["--lemma"] else RAW
lower = args["--lower"]
for in_file in in_files:
txt = []
try:
with open(in_file, "r") as f:
for line in f:
if not line.startswith("//"):
parts = line.strip().split("\t")
if len(parts) == 3 and parts[RAW] != "@" and not parts[RAW].startswith("@@") and parts[POS] != "null" and not (parts[RAW] == "\x00" and parts[LEMMA] == "\x00") and not parts[RAW].startswith("&"):
if column == RAW or parts[LEMMA] == "\x00":
it = parts[RAW]
else:
it = parts[column]
if it == "n't":
it = "not"
if not it == "q":
txt.append(it)
txt = " ".join(txt)
if lower:
txt = txt.lower()
print(txt)
except:
sys.stderr.write("Error in "+in_file)
if __name__ == "__main__":
args = docopt("""
Usage:
coha_converter.py [options] <wlp_files>...
Options:
--lemma Output lemmata instead of tokens
--lower Lowercase output
""")
main(args) | hellrich/JeSemE | pipeline/preprocessing/coha_converter.py | Python | mit | 1,534 |
#
# Copyright (c) 2009-2021 Tom Keffer <[email protected]> and
# Matthew Wall
#
# See the file LICENSE.txt for your full rights.
#
"""Utilities for managing the config file"""
from __future__ import print_function
from __future__ import absolute_import
import sys
import configobj
import weecfg
import weewx
from weecfg import Logger
# The default station information:
stn_info_defaults = {
'location' : "My Home Town",
'latitude' : "0.0",
'longitude': "0.0",
'altitude': "0, meter",
'unit_system': 'metricwx',
'register_this_station': 'false',
'station_type': 'Simulator',
'driver': 'weewx.drivers.simulator',
'lang' : 'en',
}
class ConfigEngine(object):
"""Install, upgrade, or reconfigure the configuration file, weewx.conf"""
def __init__(self, logger=None):
self.logger = logger or Logger()
def run(self, args, options):
if options.version:
print(weewx.__version__)
sys.exit(0)
if options.list_drivers:
weecfg.print_drivers()
sys.exit(0)
#
# If we got to this point, the verb must be --install, --upgrade, or --reconfigure.
# Check for errors in the options.
#
# There must be one, and only one, of options install, upgrade, and reconfigure
if sum([options.install is not None,
options.upgrade is not None,
options.reconfigure is not None]) != 1:
sys.exit("Must specify one and only one of --install, --upgrade, or --reconfigure.")
# Check for missing --dist-config
if (options.install or options.upgrade) and not options.dist_config:
sys.exit("The commands --install and --upgrade require option --dist-config.")
# Check for missing config file
if options.upgrade and not (options.config_path or args):
sys.exit("The command --upgrade requires an existing configuration file.")
if options.install and not options.output:
sys.exit("The --install command requires option --output.")
# The install option does not take an old config file
if options.install and (options.config_path or args):
sys.exit("A configuration file cannot be used with the --install command.")
#
# Now run the commands.
#
# First, fiddle with option --altitude to convert it into a list:
if options.altitude:
options.altitude = options.altitude.split(",")
# Option "--unit-system" used to be called "--units". For backwards compatibility, allow
# both.
if options.units:
if options.unit_system:
sys.exit("Specify either option --units or option --unit-system, but not both")
options.unit_system = options.units
delattr(options, "units")
if options.install or options.upgrade:
# These options require a distribution config file.
# Open it up and parse it:
try:
dist_config_dict = configobj.ConfigObj(options.dist_config,
file_error=True,
encoding='utf-8')
except IOError as e:
sys.exit("Unable to open distribution configuration file: %s" % e)
except SyntaxError as e:
sys.exit("Syntax error in distribution configuration file '%s': %s"
% (options.dist_config, e))
# The install command uses the distribution config file as its input.
# Other commands use an existing config file.
if options.install:
config_dict = dist_config_dict
else:
try:
config_path, config_dict = weecfg.read_config(options.config_path, args)
except SyntaxError as e:
sys.exit("Syntax error in configuration file: %s" % e)
except IOError as e:
sys.exit("Unable to open configuration file: %s" % e)
self.logger.log("Using configuration file %s" % config_path)
if options.upgrade:
# Update the config dictionary, then merge it with the distribution
# dictionary
weecfg.update_and_merge(config_dict, dist_config_dict)
elif options.install or options.reconfigure:
# Extract stn_info from the config_dict and command-line options:
stn_info = self.get_stn_info(config_dict, options)
# Use it to modify the configuration file.
weecfg.modify_config(config_dict, stn_info, self.logger, options.debug)
else:
sys.exit("Internal logic error in config.py")
# For the path to the final file, use whatever was specified by --output,
# or the original path if that wasn't specified
output_path = options.output or config_path
# Save weewx.conf, backing up any old file.
backup_path = weecfg.save(config_dict, output_path, not options.no_backup)
if backup_path:
self.logger.log("Saved backup to %s" % backup_path)
def get_stn_info(self, config_dict, options):
"""Build the stn_info structure. Extract first from the config_dict object,
then from any command-line overrides, then use defaults, then prompt the user
for values."""
# Start with values from the config file:
stn_info = weecfg.get_station_info_from_config(config_dict)
# Get command line overrides, and apply them to stn_info. If that leaves a value
# unspecified, then get it from the defaults.
for k in stn_info_defaults:
# Override only if the option exists and is not None:
if hasattr(options, k) and getattr(options, k) is not None:
stn_info[k] = getattr(options, k)
elif k not in stn_info:
# Value is still not specified. Get a default value
stn_info[k] = stn_info_defaults[k]
# Unless --no-prompt has been specified, give the user a chance
# to change things:
if not options.no_prompt:
prompt_info = weecfg.prompt_for_info(**stn_info)
stn_info.update(prompt_info)
driver = weecfg.prompt_for_driver(stn_info.get('driver'))
stn_info['driver'] = driver
stn_info.update(weecfg.prompt_for_driver_settings(driver, config_dict))
return stn_info
| weewx/weewx | bin/weecfg/config.py | Python | gpl-3.0 | 6,561 |
# coding: utf-8
from django.conf.urls import url, patterns
from rest_framework import routers
from parkkeeper import views
from rest_framework.urlpatterns import format_suffix_patterns
router = routers.DefaultRouter()
router.register('host', views.HostViewSet)
router.register('host_group', views.HostGroupViewSet)
router.register('monit_schedule', views.MonitScheduleViewSet)
router.register('work_schedule', views.WorkScheduleViewSet)
urlpatterns = [
url(r'^$', views.index),
url(r'^monit_status_latest/$', views.monit_status_latest),
url(r'^work_status_latest/$', views.work_status_latest),
url(r'^monit_task/(.+)/$', views.monit_task),
url(r'^work_task/(.+)/$', views.work_task),
]
urlpatterns = format_suffix_patterns(urlpatterns)
urlpatterns += router.urls
| telminov/django-park-keeper | parkkeeper/urls.py | Python | mit | 788 |
# -*- coding: utf-8; mode: python; -*-
"""
A package that implements offline messages for Django
Web Framework.
(C) 2011 oDesk www.oDesk.com w/revisions by Zapier.com
"""
from setuptools import setup, find_packages
setup(
name='fool-django-offline-messages',
version='0.3.9.dev0',
description='A package that implements offline messages for Django plus more',
long_description='A package that implements offline messages for Django Web Framework',
license='BSD',
keywords='django offline messages',
url='https://github.com/themotleyfool/django-offline-messages',
dependency_links= ['http://localshop.foolhq.com/packages/',],
author='Brian Faherty',
author_email='[email protected]',
maintainer='Brian Faherty',
maintainer_email='[email protected]',
packages=find_packages(),
include_package_data=True,
install_requires = ['jsonfield', ],
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='tests.runtests.runtests'
)
| themotleyfool/django-offline-messages | setup.py | Python | bsd-3-clause | 1,409 |
import sys
import os
import logging as log
import numpy as np
from ex.common import *
from ex.io.common import *
import ex.pp.mr as mr
import ex.array as ea
import sdss_info as sinfo
import base
class Mapper(mr.BaseMapper):
'''compact the data. input files should be grouped to reduce the
size of inmmediate reducer results.
'''
def Map(self, input_files):
if not isinstance(input_files, list): input_files=[input_files]
check(self.output_type == 'file', 'unsupported output type')
n_files=len(input_files)
input_file=input_files[0]
output_file="{0}/{1}_group.pkl".format(
self.output_dest, SplitFilename(input_file)[0])
# if the file has already been processed
if os.path.exists(output_file):
log.info('Skipping group {0}'.format(output_file))
return n_files
log.info("Processing {0} files (group of '{1}') -> {2}".format(
n_files, input_file, output_file))
vector_feat=[]
scalar_feat=[]
for input_file in input_files:
fid=SplitFilename(input_file)[0]
hdus=LoadPickles(input_file)['hdus']
header=hdus[0]['header']
sf=hdus[1]['data']
sf=dict(sf, **hdus[1]['spec_data'])
vf={'SPECTRA': hdus[0]['data'],
'CONTINUUM': sf.pop('CONTINUUM'),
'NOISE': sf.pop('NOISE'),
'MASK': sf.pop('MASK')}
vf=dict(vf, **hdus[1]['line_data'])
vector_feat.append(vf)
scalar_feat.append(sf)
data_v={}
for key in vector_feat[0].keys():
data_v[key]=ea.AssembleMatrix(vector_feat, key, True)
data_s={}
for key in scalar_feat[0].keys():
data_s[key]=ea.AssembleVector(scalar_feat, key)
SavePickles(output_file, [{'header': header,
'vec_feat': data_v,
'sca_feat': data_s}])
return n_files
| excelly/xpy-ml | sdss/uw_data/mr_compact_data.py | Python | apache-2.0 | 2,044 |
# -*- coding: utf-8 -*-
from health_mdg6 import *
| kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/modules/health_mdg6/__init__.py | Python | gpl-3.0 | 50 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import unittest
from azure.communication.chat._communication_identifier_serializer import serialize_identifier, deserialize_identifier
from azure.communication.chat._generated.models import(
CommunicationIdentifierModel,
MicrosoftTeamsUserIdentifierModel,
CommunicationUserIdentifierModel,
PhoneNumberIdentifierModel
)
from azure.communication.chat._shared.models import(
CommunicationUserIdentifier,
CommunicationCloudEnvironment,
UnknownIdentifier,
PhoneNumberIdentifier,
MicrosoftTeamsUserIdentifier
)
class CommunicationUserIdentifierSerializerTest(unittest.TestCase):
def setUp(self):
self.testPhoneNumber="+12223334444"
self.testUserModel = CommunicationUserIdentifierModel(id="User Id")
self.testPhoneNumberModel = PhoneNumberIdentifierModel(value=self.testPhoneNumber)
self.testTeamsUserModel = MicrosoftTeamsUserIdentifierModel(user_id="Microsoft Teams User Id",
is_anonymous=True,
cloud=CommunicationCloudEnvironment.PUBLIC)
def test_serialize_communication_user(self):
communication_identifier_model = serialize_identifier(
CommunicationUserIdentifier("an id")
)
assert communication_identifier_model['communication_user']['id'] is "an id"
def test_deserialize_communication_user(self):
communication_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="an id",
communication_user=self.testUserModel
)
)
communication_identifier_expected = CommunicationUserIdentifier("an id")
assert isinstance(communication_identifier_actual, CommunicationUserIdentifier)
assert communication_identifier_actual.properties['id'] == communication_identifier_expected.properties['id']
def test_serialize_unknown_identifier(self):
unknown_identifier_model = serialize_identifier(
UnknownIdentifier("an id")
)
assert unknown_identifier_model['raw_id'] is "an id"
def test_deserialize_unknown_identifier(self):
unknown_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="an id"
)
)
unknown_identifier_expected = UnknownIdentifier("an id")
assert isinstance(unknown_identifier_actual, UnknownIdentifier)
assert unknown_identifier_actual.raw_id == unknown_identifier_expected.raw_id
def test_serialize_phone_number(self):
phone_number_identifier_model = serialize_identifier(
PhoneNumberIdentifier("phonenumber")
)
assert phone_number_identifier_model['phone_number']['value'] is "phonenumber"
def test_deserialize_phone_number(self):
phone_number_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="someid",
phone_number=self.testPhoneNumberModel
)
)
phone_number_identifier_expected = PhoneNumberIdentifier(self.testPhoneNumber, raw_id="someid")
assert isinstance(phone_number_identifier_actual, PhoneNumberIdentifier)
assert phone_number_identifier_actual.properties['value'] == phone_number_identifier_expected.properties['value']
assert phone_number_identifier_actual.raw_id == phone_number_identifier_expected.raw_id
def test_serialize_teams_user(self):
teams_user_identifier_model = serialize_identifier(
MicrosoftTeamsUserIdentifier(
user_id="teamsid",
cloud=CommunicationCloudEnvironment.PUBLIC,
raw_id="someid"
)
)
assert teams_user_identifier_model['microsoft_teams_user']['user_id'] is "teamsid"
assert teams_user_identifier_model['microsoft_teams_user']['cloud'] is CommunicationCloudEnvironment.PUBLIC
assert teams_user_identifier_model['raw_id'] is "someid"
def test_deserialize_teams_user(self):
teams_user_identifier_actual = deserialize_identifier(
CommunicationIdentifierModel(
raw_id="someid",
microsoft_teams_user=self.testTeamsUserModel
)
)
teams_user_identifier_expected = MicrosoftTeamsUserIdentifier(
raw_id="someid",
user_id="Microsoft Teams User Id",
cloud=CommunicationCloudEnvironment.PUBLIC,
is_anonymous=True
)
assert isinstance(teams_user_identifier_actual, MicrosoftTeamsUserIdentifier)
assert teams_user_identifier_actual.raw_id == teams_user_identifier_expected.raw_id
assert teams_user_identifier_actual.properties['user_id'] == teams_user_identifier_expected.properties['user_id']
assert teams_user_identifier_actual.properties['is_anonymous'] == teams_user_identifier_expected.properties['is_anonymous']
assert teams_user_identifier_actual.properties['cloud'] == teams_user_identifier_expected.properties['cloud']
def test_serialize_foreign_throws(self):
foreign_obj = "Foreign object"
self.assertRaises(
TypeError,
lambda : serialize_identifier(foreign_obj)
)
if __name__ == "__main__":
unittest.main() | Azure/azure-sdk-for-python | sdk/communication/azure-communication-chat/tests/_shared/test_communication_identifier_serializer.py | Python | mit | 5,721 |
# GENERATED FILE - DO NOT EDIT THIS FILE UNLESS YOU ARE A WIZZARD
#pylint: skip-file
from heat.engine import properties
from heat.engine import constraints
from heat.engine import attributes
from heat.common.i18n import _
from avi.heat.avi_resource import AviResource
from avi.heat.avi_resource import AviNestedResource
from options import *
from common import *
from options import *
from vi_mgr_common import *
from dos import *
from analytics_policy import *
from vip_autoscale import *
class VssPlacement(object):
# all schemas
num_subcores_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.5) Number of sub-cores that comprise a CPU core. (Default: 4)"),
required=False,
update_allowed=True,
)
core_nonaffinity_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.5) Degree of core non-affinity for VS placement. (Default: 2)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'num_subcores',
'core_nonaffinity',
)
# mapping of properties to their schemas
properties_schema = {
'num_subcores': num_subcores_schema,
'core_nonaffinity': core_nonaffinity_schema,
}
class VcenterClusters(object):
# all schemas
cluster_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
cluster_uuids_schema = properties.Schema(
properties.Schema.LIST,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=cluster_uuids_item_schema,
required=False,
update_allowed=True,
)
include_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'cluster_uuids',
'include',
)
# mapping of properties to their schemas
properties_schema = {
'cluster_uuids': cluster_uuids_schema,
'include': include_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'cluster_uuids': 'vimgrclusterruntime',
}
class VcenterHosts(object):
# all schemas
host_uuids_item_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=False,
)
host_uuids_schema = properties.Schema(
properties.Schema.LIST,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
schema=host_uuids_item_schema,
required=False,
update_allowed=True,
)
include_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'host_uuids',
'include',
)
# mapping of properties to their schemas
properties_schema = {
'host_uuids': host_uuids_schema,
'include': include_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'host_uuids': 'vimgrhostruntime',
}
class IptableRule(object):
# all schemas
src_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
dst_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
src_port_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=PortRange.properties_schema,
required=False,
update_allowed=True,
)
dst_port_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=PortRange.properties_schema,
required=False,
update_allowed=True,
)
proto_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PROTO_ALL', 'PROTO_ICMP', 'PROTO_TCP', 'PROTO_UDP']),
],
)
input_interface_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
output_interface_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
action_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ACCEPT', 'DNAT', 'DROP', 'MASQUERADE', 'REJECT']),
],
)
dnat_ip_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IpAddr.properties_schema,
required=False,
update_allowed=True,
)
tag_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'src_ip',
'dst_ip',
'src_port',
'dst_port',
'proto',
'input_interface',
'output_interface',
'action',
'dnat_ip',
'tag',
)
# mapping of properties to their schemas
properties_schema = {
'src_ip': src_ip_schema,
'dst_ip': dst_ip_schema,
'src_port': src_port_schema,
'dst_port': dst_port_schema,
'proto': proto_schema,
'input_interface': input_interface_schema,
'output_interface': output_interface_schema,
'action': action_schema,
'dnat_ip': dnat_ip_schema,
'tag': tag_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'src_ip': getattr(IpAddrPrefix, 'field_references', {}),
'dst_ip': getattr(IpAddrPrefix, 'field_references', {}),
'src_port': getattr(PortRange, 'field_references', {}),
'dst_port': getattr(PortRange, 'field_references', {}),
'dnat_ip': getattr(IpAddr, 'field_references', {}),
}
unique_keys = {
'src_ip': getattr(IpAddrPrefix, 'unique_keys', {}),
'dst_ip': getattr(IpAddrPrefix, 'unique_keys', {}),
'src_port': getattr(PortRange, 'unique_keys', {}),
'dst_port': getattr(PortRange, 'unique_keys', {}),
'dnat_ip': getattr(IpAddr, 'unique_keys', {}),
}
class IptableRuleSet(object):
# all schemas
table_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
chain_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
rules_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=IptableRule.properties_schema,
required=True,
update_allowed=False,
)
rules_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=rules_item_schema,
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'table',
'chain',
'rules',
)
# mapping of properties to their schemas
properties_schema = {
'table': table_schema,
'chain': chain_schema,
'rules': rules_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'rules': getattr(IptableRule, 'field_references', {}),
}
unique_keys = {
'rules': getattr(IptableRule, 'unique_keys', {}),
}
class ServiceEngineGroup(AviResource):
resource_name = "serviceenginegroup"
# all schemas
avi_version_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Version to use for the object. Default is 16.4.2. If you plan to use any fields introduced after 16.4.2, then this needs to be explicitly set."),
required=False,
update_allowed=True,
)
name_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=True,
update_allowed=True,
)
description_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=True,
)
max_vs_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of Virtual Services that can be placed on a single Service Engine. East West Virtual Services are excluded from this limit. (Default: 10)"),
required=False,
update_allowed=True,
)
min_scaleout_per_vs_schema = properties.Schema(
properties.Schema.NUMBER,
_("Minimum number of active Service Engines for the Virtual Service. (Default: 1)"),
required=False,
update_allowed=True,
)
max_scaleout_per_vs_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of active Service Engines for the Virtual Service. (Default: 4)"),
required=False,
update_allowed=True,
)
max_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum number of Services Engines in this group. (Default: 10)"),
required=False,
update_allowed=True,
)
vcpus_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of vcpus for each of the Service Engine virtual machines. (Default: 1)"),
required=False,
update_allowed=True,
)
memory_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Amount of memory for each of the Service Engine virtual machines. (Default: 2048)"),
required=False,
update_allowed=True,
)
disk_per_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Amount of disk space for each of the Service Engine virtual machines. (Units: GB) (Default: 10)"),
required=False,
update_allowed=True,
)
max_cpu_usage_schema = properties.Schema(
properties.Schema.NUMBER,
_("When CPU usage on an SE exceeds this threshold, Virtual Services hosted on this SE may be rebalanced to other SEs to reduce load. A new SE may be created as part of this process. (Units: PERCENT) (Default: 80)"),
required=False,
update_allowed=True,
)
min_cpu_usage_schema = properties.Schema(
properties.Schema.NUMBER,
_("When CPU usage on an SE falls below the minimum threshold, Virtual Services hosted on the SE may be consolidated onto other underutilized SEs. After consolidation, unused Service Engines may then be eligible for deletion. (Units: PERCENT) (Default: 30)"),
required=False,
update_allowed=True,
)
se_deprovision_delay_schema = properties.Schema(
properties.Schema.NUMBER,
_("Duration to preserve unused Service Engine virtual machines before deleting them. If traffic to a Virtual Service were to spike up abruptly, this SE would still be available to be utilized again rather than creating a new SE. If this value is set to 0, Controller will never delete any SEs and administrator has to manually cleanup unused SEs. (Units: MIN) (Default: 120)"),
required=False,
update_allowed=True,
)
auto_rebalance_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("If set, Virtual Services will be automatically migrated when load on an SE is less than minimum or more than maximum thresholds. Only Alerts are generated when the auto_rebalance is not set. (Default: False)"),
required=False,
update_allowed=True,
)
se_name_prefix_schema = properties.Schema(
properties.Schema.STRING,
_("Prefix to use for virtual machine name of Service Engines."),
required=False,
update_allowed=True,
)
vs_host_redundancy_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Ensure primary and secondary Service Engines are deployed on different physical hosts. (Default: True)"),
required=False,
update_allowed=True,
)
vcenter_folder_schema = properties.Schema(
properties.Schema.STRING,
_("Folder to place all the Service Engine virtual machines in vCenter."),
required=False,
update_allowed=True,
)
vcenter_datastores_item_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VcenterDatastore.properties_schema,
required=True,
update_allowed=False,
)
vcenter_datastores_schema = properties.Schema(
properties.Schema.LIST,
_(""),
schema=vcenter_datastores_item_schema,
required=False,
update_allowed=True,
)
vcenter_datastores_include_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
vcenter_datastore_mode_schema = properties.Schema(
properties.Schema.STRING,
_(" (Default: VCENTER_DATASTORE_ANY)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['VCENTER_DATASTORE_ANY', 'VCENTER_DATASTORE_LOCAL', 'VCENTER_DATASTORE_SHARED']),
],
)
vcenter_clusters_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VcenterClusters.properties_schema,
required=False,
update_allowed=True,
)
vcenter_hosts_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=VcenterHosts.properties_schema,
required=False,
update_allowed=True,
)
openstack_availability_zone_schema = properties.Schema(
properties.Schema.STRING,
_("(Deprecated in: 17.1.1) "),
required=False,
update_allowed=True,
)
cpu_reserve_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: False)"),
required=False,
update_allowed=True,
)
mem_reserve_schema = properties.Schema(
properties.Schema.BOOLEAN,
_(" (Default: True)"),
required=False,
update_allowed=True,
)
mgmt_network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Management network to use for Avi Service Engines You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
mgmt_subnet_schema = properties.Schema(
properties.Schema.MAP,
_("Management subnet to use for Avi Service Engines"),
schema=IpAddrPrefix.properties_schema,
required=False,
update_allowed=True,
)
ha_mode_schema = properties.Schema(
properties.Schema.STRING,
_("High Availability mode for all the Virtual Services using this Service Engine group. (Default: HA_MODE_SHARED)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['HA_MODE_LEGACY_ACTIVE_STANDBY', 'HA_MODE_SHARED', 'HA_MODE_SHARED_PAIR']),
],
)
algo_schema = properties.Schema(
properties.Schema.STRING,
_("In compact placement, Virtual Services are placed on existing SEs until max_vs_per_se limit is reached. (Default: PLACEMENT_ALGO_PACKED)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PLACEMENT_ALGO_DISTRIBUTED', 'PLACEMENT_ALGO_PACKED']),
],
)
buffer_se_schema = properties.Schema(
properties.Schema.NUMBER,
_("Excess Service Engine capacity provisioned for HA failover (Default: 1)"),
required=False,
update_allowed=True,
)
active_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Service Engines in active/standby mode for HA failover (Default: False)"),
required=False,
update_allowed=True,
)
placement_mode_schema = properties.Schema(
properties.Schema.STRING,
_("If placement mode is 'Auto', Virtual Services are automatically placed on Service Engines. (Default: PLACEMENT_MODE_AUTO)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['PLACEMENT_MODE_AUTO']),
],
)
openstack_mgmt_network_name_schema = properties.Schema(
properties.Schema.STRING,
_("Avi Management network name"),
required=False,
update_allowed=True,
)
openstack_mgmt_network_uuid_schema = properties.Schema(
properties.Schema.STRING,
_("Management network UUID"),
required=False,
update_allowed=True,
)
instance_flavor_schema = properties.Schema(
properties.Schema.STRING,
_("Instance/Flavor type for SE instance"),
required=False,
update_allowed=True,
)
hypervisor_schema = properties.Schema(
properties.Schema.STRING,
_("Override default hypervisor"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['DEFAULT', 'KVM', 'VMWARE_ESX', 'VMWARE_VSAN', 'XEN']),
],
)
se_dos_profile_schema = properties.Schema(
properties.Schema.MAP,
_(""),
schema=DosThresholdProfile.properties_schema,
required=False,
update_allowed=True,
)
auto_rebalance_interval_schema = properties.Schema(
properties.Schema.NUMBER,
_("Frequency of rebalance, if 'Auto rebalance' is enabled (Units: SEC) (Default: 300)"),
required=False,
update_allowed=True,
)
aggressive_failure_detection_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable aggressive failover configuration for ha. (Default: False)"),
required=False,
update_allowed=True,
)
realtime_se_metrics_schema = properties.Schema(
properties.Schema.MAP,
_("Enable or disable real time SE metrics"),
schema=MetricsRealTimeUpdate.properties_schema,
required=False,
update_allowed=True,
)
vs_scaleout_timeout_schema = properties.Schema(
properties.Schema.NUMBER,
_("Time to wait for the scaled out SE to become ready before marking the scaleout done (Units: SEC) (Default: 30)"),
required=False,
update_allowed=True,
)
vs_scalein_timeout_schema = properties.Schema(
properties.Schema.NUMBER,
_("Time to wait for the scaled in SE to drain existing flows before marking the scalein done (Units: SEC) (Default: 30)"),
required=False,
update_allowed=True,
)
hardwaresecuritymodulegroup_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(" You can either provide UUID or provide a name with the prefix 'get_avi_uuid_by_name:', e.g., 'get_avi_uuid_by_name:my_obj_name'."),
required=False,
update_allowed=True,
)
connection_memory_percentage_schema = properties.Schema(
properties.Schema.NUMBER,
_("Percentage of memory for connection state. This will come at the expense of memory used for HTTP in-memory cache. (Units: PERCENT) (Default: 50)"),
required=False,
update_allowed=True,
)
extra_config_multiplier_schema = properties.Schema(
properties.Schema.NUMBER,
_("Multiplier for extra config to support large VS/Pool config. (Default: 0.0)"),
required=False,
update_allowed=True,
)
vs_scalein_timeout_for_upgrade_schema = properties.Schema(
properties.Schema.NUMBER,
_("During SE upgrade, Time to wait for the scaled-in SE to drain existing flows before marking the scalein done (Units: SEC) (Default: 30)"),
required=False,
update_allowed=True,
)
host_attribute_key_schema = properties.Schema(
properties.Schema.STRING,
_("Key of a (Key, Value) pair identifying a label for a set of Nodes usually in Container Clouds. Needs to be specified together with host_attribute_value. SEs can be configured differently including HA modes across different SE Groups. May also be used for isolation between different classes of VirtualServices. VirtualServices' SE Group may be specified via annotations/labels. A OpenShift/Kubernetes namespace maybe annotated with a matching SE Group label as openshift.io/node-selector: apptype=prod. When multiple SE Groups are used in a Cloud with host attributes specified,just a single SE Group can exist as a match-all SE Group without a host_attribute_key."),
required=False,
update_allowed=True,
)
host_attribute_value_schema = properties.Schema(
properties.Schema.STRING,
_("Value of a (Key, Value) pair identifying a label for a set of Nodes usually in Container Clouds. Needs to be specified together with host_attribute_key."),
required=False,
update_allowed=True,
)
log_disksz_schema = properties.Schema(
properties.Schema.NUMBER,
_("Maximum disk capacity (in MB) to be allocated to an SE. This is exclusively used for debug and log data. (Units: MB) (Default: 10000)"),
required=False,
update_allowed=True,
)
os_reserved_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("Amount of extra memory to be reserved for use by the Operating System on a Service Engine. (Units: MB) (Default: 0)"),
required=False,
update_allowed=True,
)
floating_intf_ip_item_schema = properties.Schema(
properties.Schema.MAP,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 1 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
floating_intf_ip_schema = properties.Schema(
properties.Schema.LIST,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 1 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=floating_intf_ip_item_schema,
required=False,
update_allowed=True,
)
hm_on_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable active health monitoring from the standby SE for all placed virtual services. (Default: True)"),
required=False,
update_allowed=True,
)
per_app_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Per-app SE mode is designed for deploying dedicated load balancers per app (VS). In this mode, each SE is limited to a max of 2 VSs. vCPUs in per-app SEs count towards licensing usage at 25% rate. (Default: False)"),
required=False,
update_allowed=True,
)
enable_vmac_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use Virtual MAC address for interfaces on which floating interface IPs are placed (Default: False)"),
required=False,
update_allowed=True,
)
distribute_load_active_standby_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Use both the active and standby Service Engines for Virtual Service placement in the legacy active standby HA mode. (Default: False)"),
required=False,
update_allowed=True,
)
auto_redistribute_active_standby_load_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Redistribution of virtual services from the takeover SE to the replacement SE can cause momentary traffic loss. If the auto-redistribute load option is left in its default off state, any desired rebalancing requires calls to REST API. (Default: False)"),
required=False,
update_allowed=True,
)
floating_intf_ip_se_2_item_schema = properties.Schema(
properties.Schema.MAP,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 2 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=IpAddr.properties_schema,
required=True,
update_allowed=False,
)
floating_intf_ip_se_2_schema = properties.Schema(
properties.Schema.LIST,
_("If ServiceEngineGroup is configured for Legacy 1+1 Active Standby HA Mode, Floating IP's will be advertised only by the Active SE in the Pair. Virtual Services in this group must be disabled/enabled for any changes to the Floating IP's to take effect. Only active SE hosting VS tagged with Active Standby SE 2 Tag will advertise this floating IP when manual load distribution is enabled."),
schema=floating_intf_ip_se_2_item_schema,
required=False,
update_allowed=True,
)
custom_tag_item_schema = properties.Schema(
properties.Schema.MAP,
_("Custom tag will be used to create the tags for SE instance in AWS. Note this is not the same as the prefix for SE name"),
schema=CustomTag.properties_schema,
required=True,
update_allowed=False,
)
custom_tag_schema = properties.Schema(
properties.Schema.LIST,
_("Custom tag will be used to create the tags for SE instance in AWS. Note this is not the same as the prefix for SE name"),
schema=custom_tag_item_schema,
required=False,
update_allowed=True,
)
dedicated_dispatcher_core_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Dedicate the core that handles packet receive/transmit from the network to just the dispatching function. Don't use it for TCP/IP and SSL functions. (Default: False)"),
required=False,
update_allowed=True,
)
cpu_socket_affinity_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Allocate all the CPU cores for the Service Engine Virtual Machines on the same CPU socket. Applicable only for vCenter Cloud. (Default: False)"),
required=False,
update_allowed=True,
)
num_flow_cores_sum_changes_to_ignore_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of changes in num flow cores sum to ignore. (Default: 8)"),
required=False,
update_allowed=True,
)
least_load_core_selection_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Select core with least load for new flow. (Default: True)"),
required=False,
update_allowed=True,
)
extra_shared_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Extra config memory to support large Geo DB configuration. (Units: MB) (Default: 0)"),
required=False,
update_allowed=True,
)
se_tunnel_mode_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Determines if DSR from secondary SE is active or not: 0: Automatically determine based on hypervisor type. 1: Disable DSR unconditionally. ~[0,1]: Enable DSR unconditionally. (Default: 0)"),
required=False,
update_allowed=True,
)
openstack_availability_zones_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.1) "),
required=True,
update_allowed=False,
)
openstack_availability_zones_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) "),
schema=openstack_availability_zones_item_schema,
required=False,
update_allowed=True,
)
service_ip_subnets_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.1.1) Subnets assigned to the SE group. Required for VS group placement."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
service_ip_subnets_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.1) Subnets assigned to the SE group. Required for VS group placement."),
schema=service_ip_subnets_item_schema,
required=False,
update_allowed=True,
)
se_vs_hb_max_vs_in_pkt_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Maximum number of virtualservices for which heartbeat messages are aggregated in one packet. (Default: 256)"),
required=False,
update_allowed=True,
)
se_vs_hb_max_pkts_in_batch_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.1) Maximum number of aggregated vs heartbeat packets to send in a batch. (Default: 8)"),
required=False,
update_allowed=True,
)
auto_rebalance_criteria_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.3) Set of criteria for SE Auto Rebalance."),
required=True,
update_allowed=False,
constraints=[
constraints.AllowedValues(['SE_AUTO_REBALANCE_CPS', 'SE_AUTO_REBALANCE_CPU', 'SE_AUTO_REBALANCE_MBPS', 'SE_AUTO_REBALANCE_OPEN_CONNS', 'SE_AUTO_REBALANCE_PPS']),
],
)
auto_rebalance_criteria_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.3) Set of criteria for SE Auto Rebalance."),
schema=auto_rebalance_criteria_item_schema,
required=False,
update_allowed=True,
)
cloud_uuid_schema = properties.Schema(
properties.Schema.STRING,
_(""),
required=False,
update_allowed=False,
)
iptables_item_schema = properties.Schema(
properties.Schema.MAP,
_("Iptable Rules"),
schema=IptableRuleSet.properties_schema,
required=True,
update_allowed=False,
)
iptables_schema = properties.Schema(
properties.Schema.LIST,
_("Iptable Rules"),
schema=iptables_item_schema,
required=False,
update_allowed=True,
)
enable_routing_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Enable routing for this ServiceEngineGroup (Default: False)"),
required=False,
update_allowed=True,
)
advertise_backend_networks_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("Advertise reach-ability of backend server networks via ADC through BGP for default gateway feature. (Default: False)"),
required=False,
update_allowed=True,
)
enable_vip_on_all_interfaces_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.1) Enable VIP on all interfaces of SE. (Default: True)"),
required=False,
update_allowed=True,
)
se_thread_multiplier_schema = properties.Schema(
properties.Schema.NUMBER,
_("Multiplier for SE threads based on vCPU. (Default: 1)"),
required=False,
update_allowed=True,
)
async_ssl_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("SSL handshakes will be handled by dedicated SSL Threads (Default: False)"),
required=False,
update_allowed=True,
)
async_ssl_threads_schema = properties.Schema(
properties.Schema.NUMBER,
_("Number of Async SSL threads per se_dp (Default: 1)"),
required=False,
update_allowed=True,
)
se_udp_encap_ipc_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.2) Determines if SE-SE IPC messages are encapsulated in an UDP header: 0: Automatically determine based on hypervisor type. 1: Use UDP encap unconditionally. ~[0,1]: Don't use UDP encap. (Default: 0)"),
required=False,
update_allowed=True,
)
se_ipc_udp_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.2) UDP Port for SE_DP IPC in Docker bridge mode. (Default: 1500)"),
required=False,
update_allowed=True,
)
se_remote_punt_udp_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.2) UDP Port for punted packets in Docker bridge mode. (Default: 1501)"),
required=False,
update_allowed=True,
)
se_tunnel_udp_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) UDP Port for tunneled packets from secondary to primary SE in Docker bridge mode. (Default: 1550)"),
required=False,
update_allowed=True,
)
custom_securitygroups_mgmt_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with management vNic for SE instances in OpenStack and AWS Clouds."),
required=True,
update_allowed=False,
)
custom_securitygroups_mgmt_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with management vNic for SE instances in OpenStack and AWS Clouds."),
schema=custom_securitygroups_mgmt_item_schema,
required=False,
update_allowed=True,
)
custom_securitygroups_data_item_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with data vNics for SE instances in OpenStack and AWS Clouds."),
required=True,
update_allowed=False,
)
custom_securitygroups_data_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.1.3) Custom Security Groups to be associated with data vNics for SE instances in OpenStack and AWS Clouds."),
schema=custom_securitygroups_data_item_schema,
required=False,
update_allowed=True,
)
archive_shm_limit_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) Amount of SE memory in GB until which shared memory is collected in core archive. (Units: GB) (Default: 8)"),
required=False,
update_allowed=True,
)
significant_log_throttle_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) This setting limits the number of significant logs generated per second per core on this SE. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"),
required=False,
update_allowed=True,
)
udf_log_throttle_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) This setting limits the number of UDF logs generated per second per core on this SE. UDF logs are generated due to the configured client log filters or the rules with logging enabled. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"),
required=False,
update_allowed=True,
)
non_significant_log_throttle_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.3) This setting limits the number of non-significant logs generated per second per core on this SE. Default is 100 logs per second. Set it to zero (0) to disable throttling. (Units: PER_SECOND) (Default: 100)"),
required=False,
update_allowed=True,
)
ingress_access_mgmt_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.5) Program SE security group ingress rules to allow SSH/ICMP management access from remote CIDR type. (Default: SG_INGRESS_ACCESS_ALL)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SG_INGRESS_ACCESS_ALL', 'SG_INGRESS_ACCESS_NONE', 'SG_INGRESS_ACCESS_VPC']),
],
)
ingress_access_data_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.1.5) Program SE security group ingress rules to allow VIP data access from remote CIDR type. (Default: SG_INGRESS_ACCESS_ALL)"),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SG_INGRESS_ACCESS_ALL', 'SG_INGRESS_ACCESS_NONE', 'SG_INGRESS_ACCESS_VPC']),
],
)
se_sb_dedicated_core_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 16.5.2, 17.1.9, 17.2.3) Sideband traffic will be handled by a dedicated core (Default: False)"),
required=False,
update_allowed=True,
)
se_probe_port_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.2) TCP port on SE where echo service will be run (Default: 7)"),
required=False,
update_allowed=True,
)
se_sb_threads_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 16.5.2, 17.1.9, 17.2.3) Number of Sideband threads per SE (Default: 1)"),
required=False,
update_allowed=True,
)
ignore_rtt_threshold_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.1.6,17.2.2) Ignore RTT samples if it is above threshold (Units: MILLISECONDS) (Default: 5000)"),
required=False,
update_allowed=True,
)
waf_mempool_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.3) Enable memory pool for WAF (Default: True)"),
required=False,
update_allowed=True,
)
waf_mempool_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.3) Memory pool size used for WAF (Units: KB) (Default: 64)"),
required=False,
update_allowed=True,
)
se_bandwidth_type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.5) Select the SE bandwidth for the bandwidth license."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['SE_BANDWIDTH_10000M', 'SE_BANDWIDTH_1000M', 'SE_BANDWIDTH_200M', 'SE_BANDWIDTH_25M', 'SE_BANDWIDTH_UNLIMITED']),
],
)
license_type_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.5) If no license type is specified then default license enforcement for the cloud type is chosen."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['LIC_BACKEND_SERVERS', 'LIC_CORES', 'LIC_HOSTS', 'LIC_SE_BANDWIDTH', 'LIC_SOCKETS']),
],
)
license_tier_schema = properties.Schema(
properties.Schema.STRING,
_("(Introduced in: 17.2.5) Specifies the license tier which would be used. This field by default inherits the value from cloud."),
required=False,
update_allowed=True,
constraints=[
constraints.AllowedValues(['ENTERPRISE_16', 'ENTERPRISE_18']),
],
)
allow_burst_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.5) Allow SEs to be created using burst license"),
required=False,
update_allowed=True,
)
auto_rebalance_capacity_per_se_item_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.4) Capacities of SE for auto rebalance for each criteria."),
required=True,
update_allowed=False,
)
auto_rebalance_capacity_per_se_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 17.2.4) Capacities of SE for auto rebalance for each criteria."),
schema=auto_rebalance_capacity_per_se_item_schema,
required=False,
update_allowed=True,
)
host_gateway_monitor_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.4) Enable the host gateway monitor when service engine is deployed as docker container. Disabled by default. (Default: False)"),
required=False,
update_allowed=True,
)
vss_placement_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.5) Parameters to place Virtual Services on only a subset of the cores of an SE."),
schema=VssPlacement.properties_schema,
required=False,
update_allowed=True,
)
flow_table_new_syn_max_entries_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.5) Maximum number of flow table entries that have not completed TCP three-way handshake yet (Default: 0)"),
required=False,
update_allowed=True,
)
minimum_required_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.1) (Deprecated in: 18.1.2) Required available config memory to apply any configuration (Units: PERCENT)"),
required=False,
update_allowed=True,
)
disable_csum_offloads_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.1.14, 17.2.5, 18.1.1) Stop using TCP/UDP and IP checksum offload features of NICs (Default: False)"),
required=False,
update_allowed=True,
)
disable_gro_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.5, 18.1.1) Disable Generic Receive Offload (GRO) in DPDK poll-mode driver packet receive path. GRO is on by default on NICs that do not support LRO (Large Receive Offload) or do not gain performance boost from LRO. (Default: False)"),
required=False,
update_allowed=True,
)
disable_tso_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.5, 18.1.1) Disable TCP Segmentation Offload (TSO) in DPDK poll-mode driver packet transmit path. TSO is on by default on NICs that support it. (Default: False)"),
required=False,
update_allowed=True,
)
enable_hsm_priming_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.7, 18.1.1) (This is a beta feature). Enable HSM key priming. If enabled, key handles on the hsm will be synced to SE before processing client connections. (Default: False)"),
required=False,
update_allowed=True,
)
service_ip6_subnets_item_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.1) IPv6 Subnets assigned to the SE group. Required for VS group placement."),
schema=IpAddrPrefix.properties_schema,
required=True,
update_allowed=False,
)
service_ip6_subnets_schema = properties.Schema(
properties.Schema.LIST,
_("(Introduced in: 18.1.1) IPv6 Subnets assigned to the SE group. Required for VS group placement."),
schema=service_ip6_subnets_item_schema,
required=False,
update_allowed=True,
)
se_tracert_port_range_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 17.2.8) Traceroute port range"),
schema=PortRange.properties_schema,
required=False,
update_allowed=True,
)
distribute_queues_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.8) Distributes queue ownership among cores so multiple cores handle dispatcher duties. (Default: False)"),
required=False,
update_allowed=True,
)
additional_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.1) (Deprecated in: 18.1.2) Indicates the percent of config memory used for config updates. (Units: PERCENT)"),
required=False,
update_allowed=True,
)
vss_placement_enabled_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.1) If set, Virtual Services will be placed on only a subset of the cores of an SE. (Default: False)"),
required=False,
update_allowed=True,
)
enable_multi_lb_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 17.2.10, 18.1.2) Applicable only for Azure cloud with Basic SKU LB. If set, additional Azure LBs will be automatically created if resources in existing LB are exhausted. (Default: False)"),
required=False,
update_allowed=True,
)
n_log_streaming_threads_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.12, 18.1.2) Number of threads to use for log streaming. (Default: 1)"),
required=False,
update_allowed=True,
)
free_list_size_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.10) Number of entries in the free list (Default: 1024)"),
required=False,
update_allowed=True,
)
max_rules_per_lb_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.12, 18.1.2) Applicable to Azure platform only. Maximum number of rules per Azure LB. (Default: 150)"),
required=False,
update_allowed=True,
)
max_public_ips_per_lb_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 17.2.12, 18.1.2) Applicable to Azure platform only. Maximum number of public IPs per Azure LB. (Default: 30)"),
required=False,
update_allowed=True,
)
waf_learning_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Amount of memory reserved on SE for WAF learning. This can be atmost 5% of SE memory. (Units: MB) (Default: 0)"),
required=False,
update_allowed=True,
)
waf_learning_interval_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Frequency with which SE publishes WAF learning. (Units: MIN) (Default: 10)"),
required=False,
update_allowed=True,
)
self_se_election_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.2) Enable SEs to elect a primary amongst themselves in the absence of a connectivity to controller. (Default: False)"),
required=False,
update_allowed=True,
)
vip_asg_schema = properties.Schema(
properties.Schema.MAP,
_("(Introduced in: 18.1.2) When vip_asg is set, Vip configuration will be managed by Avi.User will be able to configure vip_asg or Vips individually at the time of create."),
schema=VipAutoscaleGroup.properties_schema,
required=False,
update_allowed=True,
)
minimum_connection_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Indicates the percent of memory reserved for connections. (Units: PERCENT) (Default: 20)"),
required=False,
update_allowed=True,
)
shm_minimum_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Minimum required shared memory to apply any configuration. (Units: MB) (Default: 4)"),
required=False,
update_allowed=True,
)
heap_minimum_config_memory_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Minimum required heap memory to apply any configuration. (Units: MB) (Default: 8)"),
required=False,
update_allowed=True,
)
disable_se_memory_check_schema = properties.Schema(
properties.Schema.BOOLEAN,
_("(Introduced in: 18.1.2) If set, disable the config memory check done in service engine. (Default: False)"),
required=False,
update_allowed=True,
)
memory_for_config_update_schema = properties.Schema(
properties.Schema.NUMBER,
_("(Introduced in: 18.1.2) Indicates the percent of memory reserved for config updates. (Units: PERCENT) (Default: 15)"),
required=False,
update_allowed=True,
)
# properties list
PROPERTIES = (
'avi_version',
'name',
'description',
'max_vs_per_se',
'min_scaleout_per_vs',
'max_scaleout_per_vs',
'max_se',
'vcpus_per_se',
'memory_per_se',
'disk_per_se',
'max_cpu_usage',
'min_cpu_usage',
'se_deprovision_delay',
'auto_rebalance',
'se_name_prefix',
'vs_host_redundancy',
'vcenter_folder',
'vcenter_datastores',
'vcenter_datastores_include',
'vcenter_datastore_mode',
'vcenter_clusters',
'vcenter_hosts',
'openstack_availability_zone',
'cpu_reserve',
'mem_reserve',
'mgmt_network_uuid',
'mgmt_subnet',
'ha_mode',
'algo',
'buffer_se',
'active_standby',
'placement_mode',
'openstack_mgmt_network_name',
'openstack_mgmt_network_uuid',
'instance_flavor',
'hypervisor',
'se_dos_profile',
'auto_rebalance_interval',
'aggressive_failure_detection',
'realtime_se_metrics',
'vs_scaleout_timeout',
'vs_scalein_timeout',
'hardwaresecuritymodulegroup_uuid',
'connection_memory_percentage',
'extra_config_multiplier',
'vs_scalein_timeout_for_upgrade',
'host_attribute_key',
'host_attribute_value',
'log_disksz',
'os_reserved_memory',
'floating_intf_ip',
'hm_on_standby',
'per_app',
'enable_vmac',
'distribute_load_active_standby',
'auto_redistribute_active_standby_load',
'floating_intf_ip_se_2',
'custom_tag',
'dedicated_dispatcher_core',
'cpu_socket_affinity',
'num_flow_cores_sum_changes_to_ignore',
'least_load_core_selection',
'extra_shared_config_memory',
'se_tunnel_mode',
'openstack_availability_zones',
'service_ip_subnets',
'se_vs_hb_max_vs_in_pkt',
'se_vs_hb_max_pkts_in_batch',
'auto_rebalance_criteria',
'cloud_uuid',
'iptables',
'enable_routing',
'advertise_backend_networks',
'enable_vip_on_all_interfaces',
'se_thread_multiplier',
'async_ssl',
'async_ssl_threads',
'se_udp_encap_ipc',
'se_ipc_udp_port',
'se_remote_punt_udp_port',
'se_tunnel_udp_port',
'custom_securitygroups_mgmt',
'custom_securitygroups_data',
'archive_shm_limit',
'significant_log_throttle',
'udf_log_throttle',
'non_significant_log_throttle',
'ingress_access_mgmt',
'ingress_access_data',
'se_sb_dedicated_core',
'se_probe_port',
'se_sb_threads',
'ignore_rtt_threshold',
'waf_mempool',
'waf_mempool_size',
'se_bandwidth_type',
'license_type',
'license_tier',
'allow_burst',
'auto_rebalance_capacity_per_se',
'host_gateway_monitor',
'vss_placement',
'flow_table_new_syn_max_entries',
'minimum_required_config_memory',
'disable_csum_offloads',
'disable_gro',
'disable_tso',
'enable_hsm_priming',
'service_ip6_subnets',
'se_tracert_port_range',
'distribute_queues',
'additional_config_memory',
'vss_placement_enabled',
'enable_multi_lb',
'n_log_streaming_threads',
'free_list_size',
'max_rules_per_lb',
'max_public_ips_per_lb',
'waf_learning_memory',
'waf_learning_interval',
'self_se_election',
'vip_asg',
'minimum_connection_memory',
'shm_minimum_config_memory',
'heap_minimum_config_memory',
'disable_se_memory_check',
'memory_for_config_update',
)
# mapping of properties to their schemas
properties_schema = {
'avi_version': avi_version_schema,
'name': name_schema,
'description': description_schema,
'max_vs_per_se': max_vs_per_se_schema,
'min_scaleout_per_vs': min_scaleout_per_vs_schema,
'max_scaleout_per_vs': max_scaleout_per_vs_schema,
'max_se': max_se_schema,
'vcpus_per_se': vcpus_per_se_schema,
'memory_per_se': memory_per_se_schema,
'disk_per_se': disk_per_se_schema,
'max_cpu_usage': max_cpu_usage_schema,
'min_cpu_usage': min_cpu_usage_schema,
'se_deprovision_delay': se_deprovision_delay_schema,
'auto_rebalance': auto_rebalance_schema,
'se_name_prefix': se_name_prefix_schema,
'vs_host_redundancy': vs_host_redundancy_schema,
'vcenter_folder': vcenter_folder_schema,
'vcenter_datastores': vcenter_datastores_schema,
'vcenter_datastores_include': vcenter_datastores_include_schema,
'vcenter_datastore_mode': vcenter_datastore_mode_schema,
'vcenter_clusters': vcenter_clusters_schema,
'vcenter_hosts': vcenter_hosts_schema,
'openstack_availability_zone': openstack_availability_zone_schema,
'cpu_reserve': cpu_reserve_schema,
'mem_reserve': mem_reserve_schema,
'mgmt_network_uuid': mgmt_network_uuid_schema,
'mgmt_subnet': mgmt_subnet_schema,
'ha_mode': ha_mode_schema,
'algo': algo_schema,
'buffer_se': buffer_se_schema,
'active_standby': active_standby_schema,
'placement_mode': placement_mode_schema,
'openstack_mgmt_network_name': openstack_mgmt_network_name_schema,
'openstack_mgmt_network_uuid': openstack_mgmt_network_uuid_schema,
'instance_flavor': instance_flavor_schema,
'hypervisor': hypervisor_schema,
'se_dos_profile': se_dos_profile_schema,
'auto_rebalance_interval': auto_rebalance_interval_schema,
'aggressive_failure_detection': aggressive_failure_detection_schema,
'realtime_se_metrics': realtime_se_metrics_schema,
'vs_scaleout_timeout': vs_scaleout_timeout_schema,
'vs_scalein_timeout': vs_scalein_timeout_schema,
'hardwaresecuritymodulegroup_uuid': hardwaresecuritymodulegroup_uuid_schema,
'connection_memory_percentage': connection_memory_percentage_schema,
'extra_config_multiplier': extra_config_multiplier_schema,
'vs_scalein_timeout_for_upgrade': vs_scalein_timeout_for_upgrade_schema,
'host_attribute_key': host_attribute_key_schema,
'host_attribute_value': host_attribute_value_schema,
'log_disksz': log_disksz_schema,
'os_reserved_memory': os_reserved_memory_schema,
'floating_intf_ip': floating_intf_ip_schema,
'hm_on_standby': hm_on_standby_schema,
'per_app': per_app_schema,
'enable_vmac': enable_vmac_schema,
'distribute_load_active_standby': distribute_load_active_standby_schema,
'auto_redistribute_active_standby_load': auto_redistribute_active_standby_load_schema,
'floating_intf_ip_se_2': floating_intf_ip_se_2_schema,
'custom_tag': custom_tag_schema,
'dedicated_dispatcher_core': dedicated_dispatcher_core_schema,
'cpu_socket_affinity': cpu_socket_affinity_schema,
'num_flow_cores_sum_changes_to_ignore': num_flow_cores_sum_changes_to_ignore_schema,
'least_load_core_selection': least_load_core_selection_schema,
'extra_shared_config_memory': extra_shared_config_memory_schema,
'se_tunnel_mode': se_tunnel_mode_schema,
'openstack_availability_zones': openstack_availability_zones_schema,
'service_ip_subnets': service_ip_subnets_schema,
'se_vs_hb_max_vs_in_pkt': se_vs_hb_max_vs_in_pkt_schema,
'se_vs_hb_max_pkts_in_batch': se_vs_hb_max_pkts_in_batch_schema,
'auto_rebalance_criteria': auto_rebalance_criteria_schema,
'cloud_uuid': cloud_uuid_schema,
'iptables': iptables_schema,
'enable_routing': enable_routing_schema,
'advertise_backend_networks': advertise_backend_networks_schema,
'enable_vip_on_all_interfaces': enable_vip_on_all_interfaces_schema,
'se_thread_multiplier': se_thread_multiplier_schema,
'async_ssl': async_ssl_schema,
'async_ssl_threads': async_ssl_threads_schema,
'se_udp_encap_ipc': se_udp_encap_ipc_schema,
'se_ipc_udp_port': se_ipc_udp_port_schema,
'se_remote_punt_udp_port': se_remote_punt_udp_port_schema,
'se_tunnel_udp_port': se_tunnel_udp_port_schema,
'custom_securitygroups_mgmt': custom_securitygroups_mgmt_schema,
'custom_securitygroups_data': custom_securitygroups_data_schema,
'archive_shm_limit': archive_shm_limit_schema,
'significant_log_throttle': significant_log_throttle_schema,
'udf_log_throttle': udf_log_throttle_schema,
'non_significant_log_throttle': non_significant_log_throttle_schema,
'ingress_access_mgmt': ingress_access_mgmt_schema,
'ingress_access_data': ingress_access_data_schema,
'se_sb_dedicated_core': se_sb_dedicated_core_schema,
'se_probe_port': se_probe_port_schema,
'se_sb_threads': se_sb_threads_schema,
'ignore_rtt_threshold': ignore_rtt_threshold_schema,
'waf_mempool': waf_mempool_schema,
'waf_mempool_size': waf_mempool_size_schema,
'se_bandwidth_type': se_bandwidth_type_schema,
'license_type': license_type_schema,
'license_tier': license_tier_schema,
'allow_burst': allow_burst_schema,
'auto_rebalance_capacity_per_se': auto_rebalance_capacity_per_se_schema,
'host_gateway_monitor': host_gateway_monitor_schema,
'vss_placement': vss_placement_schema,
'flow_table_new_syn_max_entries': flow_table_new_syn_max_entries_schema,
'minimum_required_config_memory': minimum_required_config_memory_schema,
'disable_csum_offloads': disable_csum_offloads_schema,
'disable_gro': disable_gro_schema,
'disable_tso': disable_tso_schema,
'enable_hsm_priming': enable_hsm_priming_schema,
'service_ip6_subnets': service_ip6_subnets_schema,
'se_tracert_port_range': se_tracert_port_range_schema,
'distribute_queues': distribute_queues_schema,
'additional_config_memory': additional_config_memory_schema,
'vss_placement_enabled': vss_placement_enabled_schema,
'enable_multi_lb': enable_multi_lb_schema,
'n_log_streaming_threads': n_log_streaming_threads_schema,
'free_list_size': free_list_size_schema,
'max_rules_per_lb': max_rules_per_lb_schema,
'max_public_ips_per_lb': max_public_ips_per_lb_schema,
'waf_learning_memory': waf_learning_memory_schema,
'waf_learning_interval': waf_learning_interval_schema,
'self_se_election': self_se_election_schema,
'vip_asg': vip_asg_schema,
'minimum_connection_memory': minimum_connection_memory_schema,
'shm_minimum_config_memory': shm_minimum_config_memory_schema,
'heap_minimum_config_memory': heap_minimum_config_memory_schema,
'disable_se_memory_check': disable_se_memory_check_schema,
'memory_for_config_update': memory_for_config_update_schema,
}
# for supporting get_avi_uuid_by_name functionality
field_references = {
'iptables': getattr(IptableRuleSet, 'field_references', {}),
'floating_intf_ip_se_2': getattr(IpAddr, 'field_references', {}),
'hardwaresecuritymodulegroup_uuid': 'hardwaresecuritymodulegroup',
'vcenter_hosts': getattr(VcenterHosts, 'field_references', {}),
'custom_tag': getattr(CustomTag, 'field_references', {}),
'service_ip_subnets': getattr(IpAddrPrefix, 'field_references', {}),
'mgmt_network_uuid': 'network',
'vcenter_datastores': getattr(VcenterDatastore, 'field_references', {}),
'mgmt_subnet': getattr(IpAddrPrefix, 'field_references', {}),
'vip_asg': getattr(VipAutoscaleGroup, 'field_references', {}),
'service_ip6_subnets': getattr(IpAddrPrefix, 'field_references', {}),
'floating_intf_ip': getattr(IpAddr, 'field_references', {}),
'se_tracert_port_range': getattr(PortRange, 'field_references', {}),
'vcenter_clusters': getattr(VcenterClusters, 'field_references', {}),
'se_dos_profile': getattr(DosThresholdProfile, 'field_references', {}),
'realtime_se_metrics': getattr(MetricsRealTimeUpdate, 'field_references', {}),
'vss_placement': getattr(VssPlacement, 'field_references', {}),
}
unique_keys = {
'iptables': getattr(IptableRuleSet, 'unique_keys', {}),
'floating_intf_ip_se_2': getattr(IpAddr, 'unique_keys', {}),
'vcenter_hosts': getattr(VcenterHosts, 'unique_keys', {}),
'custom_tag': getattr(CustomTag, 'unique_keys', {}),
'service_ip_subnets': getattr(IpAddrPrefix, 'unique_keys', {}),
'realtime_se_metrics': getattr(MetricsRealTimeUpdate, 'unique_keys', {}),
'vcenter_datastores': getattr(VcenterDatastore, 'unique_keys', {}),
'mgmt_subnet': getattr(IpAddrPrefix, 'unique_keys', {}),
'vip_asg': getattr(VipAutoscaleGroup, 'unique_keys', {}),
'service_ip6_subnets': getattr(IpAddrPrefix, 'unique_keys', {}),
'floating_intf_ip': getattr(IpAddr, 'unique_keys', {}),
'se_tracert_port_range': getattr(PortRange, 'unique_keys', {}),
'vcenter_clusters': getattr(VcenterClusters, 'unique_keys', {}),
'se_dos_profile': getattr(DosThresholdProfile, 'unique_keys', {}),
'vss_placement': getattr(VssPlacement, 'unique_keys', {}),
}
def resource_mapping():
return {
'Avi::LBaaS::ServiceEngineGroup': ServiceEngineGroup,
}
| avinetworks/avi-heat | avi/heat/resources/se_group.py | Python | apache-2.0 | 63,203 |
"""
Output Format Module
Secret: The format and structure of the output data.
Service: Outputs the results of the calculations, including the input parameters, the demand, the capacity,
the probability of breakage, and both safety requirements.
"""
def display_output(filename, q, j, q_hat_tol, pb, lr, nfl, is_safe1, is_safe2, params):
f = open(filename, 'w')
for attr, value in sorted(params.__dict__.items()):
string_param = attr+"\t"+str(value)+"\n"
f.write(string_param)
f.write("Demand (q) %.15e\n" % q)
f.write("Stress Distribution Factor (J) %.15e\n" % j)
f.write("Tolerable Pressure %.15e\n" % q_hat_tol)
f.write("Probability of Breakage (Pb) %.15e\n" % pb)
f.write("Capacity (LR)%.15e\n" % lr)
f.write("Non-Factored Load (NFL)%.15e\n" % nfl)
f.write("Safety Requirement-1 %f\n" % is_safe1)
f.write("Safety Requirement-2 %f\n" % is_safe2)
if is_safe1 and is_safe2:
f.write("For the given input parameters, the glass is considered safe\n")
else:
f.write("For the given input parameters, the glass is NOT considered safe\n")
f.close()
| JacquesCarette/literate-scientific-software | Presentations/WG2_11/GlassBR_Code/original/outputFormat.py | Python | bsd-2-clause | 1,130 |
import numpy as np
def nan_rmse(A, B):
'''
Returns RMSE between two numpy arrays
'''
dat = (A - B) ** 2
mdat = np.ma.masked_array(dat, np.isnan(dat))
return np.sqrt(np.mean(mdat))
| jniznan/edami | edami/utils.py | Python | mit | 206 |
# encoding: utf-8
# module samba.dcerpc.dfs
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/dfs.so
# by generator 1.135
""" dfs DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class Info4(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
comment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
guid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
num_stores = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
path = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
stores = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
timeout = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
| ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/dfs/Info4.py | Python | gpl-2.0 | 1,231 |
"""Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| WangWenjun559/Weiss | summary/sumy/sklearn/neighbors/unsupervised.py | Python | apache-2.0 | 4,461 |
"""distutils.command.sdist
Implements the Distutils 'sdist' command (create a source distribution)."""
__revision__ = "$Id$"
import os
import string
import sys
from glob import glob
from warnings import warn
from distutils.core import Command
from distutils import dir_util, dep_util, file_util, archive_util
from distutils.text_file import TextFile
from distutils.errors import (DistutilsPlatformError, DistutilsOptionError,
DistutilsTemplateError)
from distutils.filelist import FileList
from distutils import log
from distutils.util import convert_path
def show_formats():
"""Print all possible values for the 'formats' option (used by
the "--help-formats" command-line option).
"""
from distutils.fancy_getopt import FancyGetopt
from distutils.archive_util import ARCHIVE_FORMATS
formats = []
for format in ARCHIVE_FORMATS.keys():
formats.append(("formats=" + format, None,
ARCHIVE_FORMATS[format][2]))
formats.sort()
FancyGetopt(formats).print_help(
"List of available source distribution formats:")
class sdist(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self):
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't',
"name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm',
"name of manifest file [default: MANIFEST]"),
('use-defaults', None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]"),
('no-defaults', None,
"don't include the default file set"),
('prune', None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]"),
('no-prune', None,
"don't automatically exclude anything"),
('manifest-only', 'o',
"just regenerate the manifest and then stop "
"(implies --force-manifest)"),
('force-manifest', 'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated."),
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
('metadata-check', None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]"),
('owner=', 'u',
"Owner name used when creating a tar file [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file [default: current group]"),
]
boolean_options = ['use-defaults', 'prune',
'manifest-only', 'force-manifest',
'keep-temp', 'metadata-check']
help_options = [
('help-formats', None,
"list available distribution formats", show_formats),
]
negative_opt = {'no-defaults': 'use-defaults',
'no-prune': 'prune' }
default_format = {'posix': 'gztar',
'nt': 'zip' }
sub_commands = [('check', checking_metadata)]
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = 1
self.prune = 1
self.manifest_only = 0
self.force_manifest = 0
self.formats = None
self.keep_temp = 0
self.dist_dir = None
self.archive_files = None
self.metadata_check = 1
self.owner = None
self.group = None
def finalize_options(self):
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create source distributions " + \
"on platform %s" % os.name
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError, \
"unknown archive format '%s'" % bad_format
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self):
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def check_metadata(self):
"""Deprecated API."""
warn("distutils.command.sdist.check_metadata is deprecated, \
use the check command instead", PendingDeprecationWarning)
check = self.distribution.get_command_obj('check')
check.ensure_finalized()
check.run()
def get_file_list(self):
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior:
# the file list is recalculated everytime because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior.
template_exists = os.path.isfile(self.template)
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = 0
for fn in alts:
if os.path.exists(fn):
got_it = 1
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
string.join(alts, ', '))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
if files:
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template,
strip_comments=1,
skip_blanks=1,
join_lines=1,
lstrip_ws=1,
rstrip_ws=1,
collapse_join=1)
while 1:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
except DistutilsTemplateError, msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
# pruning out vcs directories
# both separators are used under win32
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if os.path.isfile(self.manifest):
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
if first_line != '# file GENERATED by distutils, do NOT edit\n':
log.info("not writing to manually maintained "
"manifest file '%s'" % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
"writing manifest file '%s'" % self.manifest)
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
while 1:
line = manifest.readline()
if line == '': # end of file
break
if line[-1] == '\n':
line = line[0:-1]
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
log.warn("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warn("'%s' not a regular file -- skipping" % file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self):
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(base_name, fmt, base_dir=base_dir,
owner=self.owner, group=self.group)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
| martinbuc/missionplanner | packages/IronPython.StdLib.2.7.4/content/Lib/distutils/command/sdist.py | Python | gpl-3.0 | 18,344 |
"""Denon HEOS Media Player."""
from functools import reduce, wraps
import logging
from operator import ior
from typing import Sequence
from pyheos import HeosError, const as heos_const
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
DOMAIN,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
MEDIA_TYPE_URL,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_IDLE, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from .const import DATA_SOURCE_MANAGER, DOMAIN as HEOS_DOMAIN, SIGNAL_HEOS_UPDATED
BASE_SUPPORTED_FEATURES = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_SHUFFLE_SET
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
)
PLAY_STATE_TO_STATE = {
heos_const.PLAY_STATE_PLAY: STATE_PLAYING,
heos_const.PLAY_STATE_STOP: STATE_IDLE,
heos_const.PLAY_STATE_PAUSE: STATE_PAUSED,
}
CONTROL_TO_SUPPORT = {
heos_const.CONTROL_PLAY: SUPPORT_PLAY,
heos_const.CONTROL_PAUSE: SUPPORT_PAUSE,
heos_const.CONTROL_STOP: SUPPORT_STOP,
heos_const.CONTROL_PLAY_PREVIOUS: SUPPORT_PREVIOUS_TRACK,
heos_const.CONTROL_PLAY_NEXT: SUPPORT_NEXT_TRACK,
}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
):
"""Add media players for a config entry."""
players = hass.data[HEOS_DOMAIN][DOMAIN]
devices = [HeosMediaPlayer(player) for player in players.values()]
async_add_entities(devices, True)
def log_command_error(command: str):
"""Return decorator that logs command failure."""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
try:
await func(*args, **kwargs)
except (HeosError, ValueError) as ex:
_LOGGER.error("Unable to %s: %s", command, ex)
return wrapper
return decorator
class HeosMediaPlayer(MediaPlayerEntity):
"""The HEOS player."""
def __init__(self, player):
"""Initialize."""
self._media_position_updated_at = None
self._player = player
self._signals = []
self._supported_features = BASE_SUPPORTED_FEATURES
self._source_manager = None
async def _player_update(self, player_id, event):
"""Handle player attribute updated."""
if self._player.player_id != player_id:
return
if event == heos_const.EVENT_PLAYER_NOW_PLAYING_PROGRESS:
self._media_position_updated_at = utcnow()
await self.async_update_ha_state(True)
async def _heos_updated(self):
"""Handle sources changed."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Device added to hass."""
# Update state when attributes of the player change
self._signals.append(
self._player.heos.dispatcher.connect(
heos_const.SIGNAL_PLAYER_EVENT, self._player_update
)
)
# Update state when heos changes
self._signals.append(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_HEOS_UPDATED, self._heos_updated
)
)
@log_command_error("clear playlist")
async def async_clear_playlist(self):
"""Clear players playlist."""
await self._player.clear_queue()
@log_command_error("pause")
async def async_media_pause(self):
"""Send pause command."""
await self._player.pause()
@log_command_error("play")
async def async_media_play(self):
"""Send play command."""
await self._player.play()
@log_command_error("move to previous track")
async def async_media_previous_track(self):
"""Send previous track command."""
await self._player.play_previous()
@log_command_error("move to next track")
async def async_media_next_track(self):
"""Send next track command."""
await self._player.play_next()
@log_command_error("stop")
async def async_media_stop(self):
"""Send stop command."""
await self._player.stop()
@log_command_error("set mute")
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self._player.set_mute(mute)
@log_command_error("play media")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
if media_type in (MEDIA_TYPE_URL, MEDIA_TYPE_MUSIC):
await self._player.play_url(media_id)
return
if media_type == "quick_select":
# media_id may be an int or a str
selects = await self._player.get_quick_selects()
try:
index = int(media_id)
except ValueError:
# Try finding index by name
index = next(
(index for index, select in selects.items() if select == media_id),
None,
)
if index is None:
raise ValueError(f"Invalid quick select '{media_id}'")
await self._player.play_quick_select(index)
return
if media_type == MEDIA_TYPE_PLAYLIST:
playlists = await self._player.heos.get_playlists()
playlist = next((p for p in playlists if p.name == media_id), None)
if not playlist:
raise ValueError(f"Invalid playlist '{media_id}'")
add_queue_option = (
heos_const.ADD_QUEUE_ADD_TO_END
if kwargs.get(ATTR_MEDIA_ENQUEUE)
else heos_const.ADD_QUEUE_REPLACE_AND_PLAY
)
await self._player.add_to_queue(playlist, add_queue_option)
return
if media_type == "favorite":
# media_id may be an int or str
try:
index = int(media_id)
except ValueError:
# Try finding index by name
index = next(
(
index
for index, favorite in self._source_manager.favorites.items()
if favorite.name == media_id
),
None,
)
if index is None:
raise ValueError(f"Invalid favorite '{media_id}'")
await self._player.play_favorite(index)
return
raise ValueError(f"Unsupported media type '{media_type}'")
@log_command_error("select source")
async def async_select_source(self, source):
"""Select input source."""
await self._source_manager.play_source(source, self._player)
@log_command_error("set shuffle")
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self._player.set_play_mode(self._player.repeat, shuffle)
@log_command_error("set volume level")
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self._player.set_volume(int(volume * 100))
async def async_update(self):
"""Update supported features of the player."""
controls = self._player.now_playing_media.supported_controls
current_support = [CONTROL_TO_SUPPORT[control] for control in controls]
self._supported_features = reduce(ior, current_support, BASE_SUPPORTED_FEATURES)
if self._source_manager is None:
self._source_manager = self.hass.data[HEOS_DOMAIN][DATA_SOURCE_MANAGER]
async def async_will_remove_from_hass(self):
"""Disconnect the device when removed."""
for signal_remove in self._signals:
signal_remove()
self._signals.clear()
@property
def available(self) -> bool:
"""Return True if the device is available."""
return self._player.available
@property
def device_info(self) -> dict:
"""Get attributes about the device."""
return {
"identifiers": {(HEOS_DOMAIN, self._player.player_id)},
"name": self._player.name,
"model": self._player.model,
"manufacturer": "HEOS",
"sw_version": self._player.version,
}
@property
def device_state_attributes(self) -> dict:
"""Get additional attribute about the state."""
return {
"media_album_id": self._player.now_playing_media.album_id,
"media_queue_id": self._player.now_playing_media.queue_id,
"media_source_id": self._player.now_playing_media.source_id,
"media_station": self._player.now_playing_media.station,
"media_type": self._player.now_playing_media.type,
}
@property
def is_volume_muted(self) -> bool:
"""Boolean if volume is currently muted."""
return self._player.is_muted
@property
def media_album_name(self) -> str:
"""Album name of current playing media, music track only."""
return self._player.now_playing_media.album
@property
def media_artist(self) -> str:
"""Artist of current playing media, music track only."""
return self._player.now_playing_media.artist
@property
def media_content_id(self) -> str:
"""Content ID of current playing media."""
return self._player.now_playing_media.media_id
@property
def media_content_type(self) -> str:
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
duration = self._player.now_playing_media.duration
if isinstance(duration, int):
return duration / 1000
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
# Some media doesn't have duration but reports position, return None
if not self._player.now_playing_media.duration:
return None
return self._player.now_playing_media.current_position / 1000
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
# Some media doesn't have duration but reports position, return None
if not self._player.now_playing_media.duration:
return None
return self._media_position_updated_at
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
@property
def media_image_url(self) -> str:
"""Image url of current playing media."""
# May be an empty string, if so, return None
image_url = self._player.now_playing_media.image_url
return image_url if image_url else None
@property
def media_title(self) -> str:
"""Title of current playing media."""
return self._player.now_playing_media.song
@property
def name(self) -> str:
"""Return the name of the device."""
return self._player.name
@property
def should_poll(self) -> bool:
"""No polling needed for this device."""
return False
@property
def shuffle(self) -> bool:
"""Boolean if shuffle is enabled."""
return self._player.shuffle
@property
def source(self) -> str:
"""Name of the current input source."""
return self._source_manager.get_current_source(self._player.now_playing_media)
@property
def source_list(self) -> Sequence[str]:
"""List of available input sources."""
return self._source_manager.source_list
@property
def state(self) -> str:
"""State of the player."""
return PLAY_STATE_TO_STATE[self._player.state]
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return self._supported_features
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return str(self._player.player_id)
@property
def volume_level(self) -> float:
"""Volume level of the media player (0..1)."""
return self._player.volume / 100
| tchellomello/home-assistant | homeassistant/components/heos/media_player.py | Python | apache-2.0 | 12,782 |
# xiaolongdolly 2017.8.20
numbers = [number for number in range(1, 10)]
for output_number in numbers:
print(output_number**3)
| xiaolongdolly/Python_Course | chapter_4/cubes/cubes.py | Python | gpl-3.0 | 131 |
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import hr_department_type, hr_department
| open-synergy/opnsynid-hr | hr_department_type/models/__init__.py | Python | agpl-3.0 | 176 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid - **Memory Checker.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '22/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import logging
from PyQt4.QtCore import QCoreApplication
from safe_qgis.safe_interface import get_free_memory
from safe_qgis.safe_interface import messaging as m
from safe_qgis.safe_interface import DYNAMIC_MESSAGE_SIGNAL
from safe_qgis.safe_interface import styles
from third_party.pydispatch import dispatcher
PROGRESS_UPDATE_STYLE = styles.PROGRESS_UPDATE_STYLE
INFO_STYLE = styles.INFO_STYLE
WARNING_STYLE = styles.WARNING_STYLE
KEYWORD_STYLE = styles.KEYWORD_STYLE
LOGGER = logging.getLogger('InaSAFE')
def tr(string):
"""We implement this ourselves since we do not inherit QObject.
:param string: The string for translation.
:type string: str
:returns: Translated version of string.
:rtype: str
"""
return QCoreApplication.translate('MemoryChecker', string)
def send_message(message):
"""Send a message using the dispatcher.
:param message: A Message object to be sent to a message viewer.
:type message: Message
"""
dispatcher.send(
signal=DYNAMIC_MESSAGE_SIGNAL,
sender=dispatcher.Anonymous,
message=message)
def check_memory_usage(buffered_geo_extent, cell_size):
"""Helper to check if analysis is feasible when extents change.
For simplicity, we will do all our calculations in geocrs.
:param buffered_geo_extent: An extent in the for [xmin, ymin, xmax, ymax]
:type buffered_geo_extent: list
:param cell_size: The size of a cell (assumes in the X direction).
:type cell_size: float
:returns: True if it appears we have enough memory (or we can't compute
it), False if it appears we do not have enough.
:rtype: bool
:raises: A Message containing notes about how much memory is needed
for a single raster and if this is likely to result in an error.
:returns: True if it is supposed that there is sufficient memory,
False if it is supposed that too little memory exists.
:rtype: bool
"""
myMessage = m.Message()
myCheckHeading = m.Heading(
tr('Checking available memory'), **PROGRESS_UPDATE_STYLE)
myMessage.add(myCheckHeading)
myWidth = buffered_geo_extent[2] - buffered_geo_extent[0]
myHeight = buffered_geo_extent[3] - buffered_geo_extent[1]
try:
myWidth = myWidth / cell_size
myHeight = myHeight / cell_size
except TypeError:
# Could have been a vector layer for example
myReason = tr(
'Computed cellsize was None. Memory check currently only works '
'for raster input layers.')
myMessage.add(myReason)
send_message(myMessage)
return True # assume enough mem since we have no vector check logic
myList = m.BulletedList()
myBullet = m.Paragraph(
m.ImportantText(tr('Width: ')), str(myWidth))
myList.add(myBullet)
myBullet = m.Paragraph(
m.ImportantText(tr('Height: ')), str(myHeight))
myList.add(myBullet)
myBullet = m.Paragraph(
m.ImportantText(tr('Cell Size: ')), str(cell_size))
myList.add(myBullet)
myMessage.add(myList)
# Compute mem requirement in MB (assuming numpy uses 8 bytes by per
# cell) see this link:
# http://stackoverflow.com/questions/11784329/
# python-memory-usage-of-numpy-arrays
# Also note that the on-disk requirement of the clipped tifs is about
# half this since the tifs as in single precision,
# whereas numpy arrays are in double precision.
myRequirement = ((myWidth * myHeight * 8) / 1024 / 1024)
try:
myFreeMemory = get_free_memory()
except ValueError:
myErrorHeading = m.Heading(tr('Memory check error'), **WARNING_STYLE)
myErrorMessage = tr('Could not determine free memory')
myMessage.add(myErrorHeading)
myMessage.add(myErrorMessage)
send_message(myMessage)
LOGGER.exception(myMessage)
return True # still let the user try to run their analysis
# We work on the assumption that if more than 10% of the available
# memory is occupied by a single layer we could run out of memory
# (depending on the impact function). This is because multiple
# in memory copies of the layer are often made during processing.
myWarningLimit = 10
myUsageIndicator = (float(myRequirement) / float(myFreeMemory)) * 100
myCountsMessage = tr('Memory requirement: about %d mb per raster layer ('
'%d mb available)') % (myRequirement, myFreeMemory)
myUsageMessage = tr('Memory used / available: %d/%d') % (
myUsageIndicator, myWarningLimit)
myMessage.add(myCountsMessage)
myMessage.add(myUsageMessage)
if myWarningLimit <= myUsageIndicator:
myWarningHeading = m.Heading(
tr('Potential memory issue'), **WARNING_STYLE)
myWarningMessage = tr(
'There may not be enough free memory to run this analysis. You can'
' attempt to run the analysis anyway, but note that your computer '
'may become unresponsive during execution, and / or the analysis '
'may fail due to insufficient memory. Proceed at your own risk.')
mySuggestionHeading = m.Heading(
tr('Suggestion'), **INFO_STYLE)
mySuggestion = tr(
'Try zooming in to a smaller area or using a raster layer with a '
'coarser resolution to speed up execution and reduce memory '
'requirements. You could also try adding more RAM to your '
'computer.')
myMessage.add(myWarningHeading)
myMessage.add(myWarningMessage)
myMessage.add(mySuggestionHeading)
myMessage.add(mySuggestion)
send_message(myMessage)
LOGGER.info(myMessage.to_text())
return False
send_message(myMessage)
LOGGER.info(myMessage.to_text())
return True
| danylaksono/inasafe | safe_qgis/utilities/memory_checker.py | Python | gpl-3.0 | 6,439 |
#
# Encapsulation of statistics for a 6D particle distribution.
#
#
# Python imports
import math
# SciPy imports
import numpy as np
import scipy as sp
def calcAverages6D(array6D):
return sp.average(array6D, axis=1)
def subtractAverages6D(array6D):
averages6D = calcAverages6D(array6D)
for nLoop in range(6):
array6D[nLoop,:] -= averages6D[nLoop]
def calcVariance6D(array6D):
npoints = array6D.shape[1]
avgIter = array6D[:,0]
varIter = sp.zeros(6)
for nLoop in range(1,npoints):
tmpData = array6D[:,nLoop]
tmpIter = (tmpData - avgIter)
avgIter += (tmpData - avgIter) / (nLoop+1)
varIter += (tmpData - avgIter) * tmpIter
return varIter / npoints
def calcRmsValues6D(array6D):
return sp.sqrt(calcVariance6D(array6D))
def normalizeRmsValues6D(array6D):
invRmsValues6D = 1. / calcRmsValues6D(array6D)
for nLoop in range(6):
array6D[nLoop,:] *= invRmsValues6D[nLoop]
def calcMinValues6D(array6D):
return np.min(array6D, axis=1)
def calcMaxValues6D(array6D):
return np.max(array6D, axis=1)
def calcCorrelations6D(array6D):
npoints = array6D.shape[1]
averages6D = calcAverages6D(array6D)
variance6D = calcVariance6D(array6D)
correlations6D = sp.zeros(6*6).reshape(6,6)
for iLoop in range(6):
for jLoop in range(6):
if iLoop == jLoop:
correlations6D[iLoop, jLoop] = variance6D[iLoop]
else:
for nLoop in range(npoints):
correlations6D[iLoop, jLoop] += \
(array6D[iLoop,nLoop] - averages6D[iLoop]) * \
(array6D[jLoop,nLoop] - averages6D[jLoop])
correlations6D[iLoop, jLoop] /= npoints
return correlations6D
def eraseCorrelations6D(array6D):
npoints = array6D.shape[1]
sigmaM = calcCorrelations6D(array6D)
eigVals, eigVecs = jacobiEigenSolver6D(sigmaM)
verboseCheck = 0
if verboseCheck == 1:
print 'eigVals = ', eigVals
temp6D = array6D.copy()
for iLoop in range(6):
for nLoop in range(npoints): array6D[iLoop,nLoop] = 0.0
for iLoop in range(6):
for jLoop in range(6):
for nLoop in range(npoints):
array6D[iLoop,nLoop] += eigVecs[jLoop,iLoop] * temp6D[jLoop,nLoop]
def jacobiEigenSolver6D(sigma6D):
# Setup
eVecs=sp.zeros(36).reshape(6,6)
for ip in range(6): eVecs[ip,ip]=1.0
bTemp=sp.zeros(6)
zTemp=sp.zeros(6)
eVals=sp.zeros(6)
for ip in range(6):
bTemp[ip]=sigma6D[ip,ip]
eVals[ip]=sigma6D[ip,ip]
# Top of the master loop
numRotations = 0
for nMaster in range(50):
sm = 0.0
for ip in range(5):
for iq in range(ip+1,6):
sm += math.fabs(sigma6D[ip,iq])
# Check for convergence
if sm == 0.0:
return eVals, eVecs # Success!
# Convergence failed, so reset threshold
if nMaster<3:
threshold=0.2*sm/36.
else:
threshold=0.0
# Next iteration
for ip in range(5):
for iq in range(ip+1,6):
gScal=100.*math.fabs(sigma6D[ip,iq])
if nMaster>3 and math.fabs(float(eVals[ip])+gScal)==math.fabs(eVals[ip]) \
and math.fabs(float(eVals[iq])+gScal)==math.fabs(eVals[iq]):
sigma6D[ip,iq]=0.0
elif math.fabs(sigma6D[ip,iq])>threshold:
hScal=float(eVals[iq])-float(eVals[ip])
if math.fabs(hScal)+gScal==math.fabs(hScal):
tScal=float(sigma6D[ip,iq])/hScal
else:
theta=0.5*hScal/float(sigma6D[ip,iq])
tScal=1.0/(math.fabs(theta)+math.sqrt(1.0+theta**2))
if theta<0.: tScal*=-1.0
cTemp=1.0/math.sqrt(1.0+tScal**2)
sTemp=tScal*cTemp
tau=sTemp/(1.0+cTemp)
hScal=tScal*float(sigma6D[ip,iq])
zTemp[ip]-=hScal
zTemp[iq]+=hScal
eVals[ip]-=hScal
eVals[iq]+=hScal
sigma6D[ip,iq]=0.0
for jLoop in range(ip):
gScal=sigma6D[jLoop,ip]
hScal=sigma6D[jLoop,iq]
sigma6D[jLoop,ip]=gScal-sTemp*(hScal+gScal*tau)
sigma6D[jLoop,iq]=hScal+sTemp*(gScal-hScal*tau)
for jLoop in range(ip+1,iq):
gScal=sigma6D[ip,jLoop]
hScal=sigma6D[jLoop,iq]
sigma6D[ip,jLoop]=gScal-sTemp*(hScal+gScal*tau)
sigma6D[jLoop,iq]=hScal+sTemp*(gScal-hScal*tau)
for jLoop in range(iq+1,6):
gScal=sigma6D[ip,jLoop]
hScal=sigma6D[iq,jLoop]
sigma6D[ip,jLoop]=gScal-sTemp*(hScal+gScal*tau)
sigma6D[iq,jLoop]=hScal+sTemp*(gScal-hScal*tau)
for jLoop in range(6):
gScal=eVecs[jLoop,ip]
hScal=eVecs[jLoop,iq]
eVecs[jLoop,ip]=gScal-sTemp*(hScal+gScal*tau)
eVecs[jLoop,iq]=hScal+sTemp*(gScal-hScal*tau)
numRotations+=1
# Collect results before checking again for convergence
for ip in range(6):
bTemp[ip]+=zTemp[ip]
eVals[ip] =bTemp[ip]
zTemp[ip] =0.0
# No convergence after 50 iterations, so give up!
raise Exception("Too many iterations in routine jacobiEigenSolver6D")
| radiasoft/radtrack | radtrack/statistics/RbStatistics6D.py | Python | apache-2.0 | 5,823 |
from urlparse import urlparse
import requests
def userinfo(url, accept="text/html"):
o = urlparse(url)
r = requests.get('{}://{}'.format(o.scheme, o.netloc),
headers={'User-Info': o.username,
'Accept': accept})
return r
| BigBlueHat/userinfo | python/client/userinfo.py | Python | apache-2.0 | 287 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import base
from cinderclient import utils
class ListExtResource(base.Resource):
@property
def summary(self):
descr = self.description.strip()
if not descr:
return '??'
lines = descr.split("\n")
if len(lines) == 1:
return lines[0]
else:
return lines[0] + "..."
class ListExtManager(base.Manager):
resource_class = ListExtResource
def show_all(self):
return self._list("/extensions", 'extensions')
@utils.service_type('volume')
def do_list_extensions(client, _args):
"""
List all the os-api extensions that are available.
"""
extensions = client.list_extensions.show_all()
fields = ["Name", "Summary", "Alias", "Updated"]
utils.print_list(extensions, fields)
| ntt-sic/python-cinderclient | cinderclient/v1/contrib/list_extensions.py | Python | apache-2.0 | 1,447 |
input()
liste = list(map(int,input().split()))
liste.sort()
print(liste[(len(liste)//2)])
| omergulen/brainhack17 | wtf/wtf.py | Python | mit | 90 |
#!/usr/bin/env python
# coding=utf-8
from toughlib.utils import safeunicode,get_currtime
from toughwlan.manage.api.api_base import ApiHandler
from toughlib.permit import permit
from toughwlan import models
import random
@permit.route(r"/api/isp/query")
class IspQueryHandler(ApiHandler):
def get(self):
self.post()
def post(self):
try:
req_msg = self.parse_form_request()
if 'isp_code' not in req_msg:
raise ValueError("isp_code required")
except Exception as err:
self.render_result(code=1, msg=safeunicode(err.message))
return
isp = self.db.query(models.TrwIsp).filter_by(isp_code=req_msg['isp_code']).first()
ispdata = { c.name : getattr(isp, c.name) for c in isp.__table__.columns}
if not isp:
self.render_result(code=1, msg="isp not exists")
else:
self.render_result(code=1, msg="success", data=ispdata)
@permit.route(r"/api/isp/register")
class IspRegisterHandler(ApiHandler):
def next_isp_code(self):
isp_code = str(random.randint(10000000,99999999))
if self.db.query(models.TrwIsp.isp_code).filter_by(isp_code=isp_code).count() > 0:
return self.next_isp_code()
else:
return isp_code
def get(self):
self.post()
def post(self):
try:
req_msg = self.parse_form_request()
except Exception as err:
self.render_result(code=1, msg=safeunicode(err.message))
return
isp = models.TrwIsp()
isp.isp_code = self.next_isp_code()
isp.isp_name = req_msg.get("isp_name","")
isp.isp_desc = req_msg.get("isp_desc","")
isp.isp_email = req_msg.get("isp_email","")
isp.isp_phone = req_msg.get("isp_phone","")
isp.isp_idcard = req_msg.get("isp_idcard","")
isp.user_total = 0
isp.status = 0
self.db.add(isp)
self.db.commit()
self.render_result(code=0, msg="success", data=dict(isp_code=isp.isp_code))
@permit.route(r"/api/isp/update")
class IspUpdateHandler(ApiHandler):
def get(self):
self.post()
def post(self):
try:
req_msg = self.parse_form_request()
if 'isp_code' not in req_msg:
raise ValueError("isp_code required")
except Exception as err:
self.render_result(code=1, msg=safeunicode(err.message))
return
isp = self.db.query(models.TrwIsp).filter_by(isp_code=req_msg['isp_code']).first()
if not isp:
self.render_result(code=1, msg="isp not exists")
return
attrs = ['isp_name','isp_desc','isp_email','isp_phone','isp_idcard']
for attr in attrs:
if attr in req_msg:
setattr(isp, attr, req_msg[attr])
if 'status' in req_msg and req_msg['status'] in ('0','1'):
isp.status = int(req_msg['status'])
isp.isp_name = req_msg.get("isp_name","")
isp.isp_desc = req_msg.get("isp_desc","")
isp.isp_email = req_msg.get("isp_email","")
isp.isp_phone = req_msg.get("isp_phone","")
isp.isp_idcard = req_msg.get("isp_idcard","")
self.db.commit()
self.render_result(code=0, msg="success")
@permit.route(r"/api/isp/subscriber")
class IspServiceSubHandler(ApiHandler):
def get(self):
self.post()
def post(self):
try:
req_msg = self.parse_form_request()
except Exception as err:
self.render_result(code=1, msg=safeunicode(err.message))
return
ispserv = models.TrwIspService()
ispserv.isp_code = req_msg.get("isp_code")
ispserv.service_type = req_msg.get("service_type")
ispserv.sub_time = get_currtime()
self.db.add(ispserv)
self.db.commit()
self.render_result(code=0, msg="success")
@permit.route(r"/api/isp/unsubscriber")
class IspServiceSubHandler(ApiHandler):
def get(self):
self.post()
def post(self):
try:
req_msg = self.parse_form_request()
if not all([req_msg.get("isp_code"),req_msg.get("service_type")]):
raise ValueError("isp_code, service_type required")
except Exception as err:
self.render_result(code=1, msg=safeunicode(err.message))
return
self.db.query(models.TrwIspService).filter_by(
isp_code = req_msg.get("isp_code"),
service_type = req_msg.get("service_type")).delete()
self.db.commit()
self.render_result(code=0, msg="success")
| talkincode/toughwlan | toughwlan/manage/api/api_isp.py | Python | agpl-3.0 | 4,654 |
import pexpect
import unittest
import sys
import os.path
DATADIR = os.path.join(os.path.dirname(__file__), 'data')
KEYA_VDO = os.path.join(DATADIR, 'samplea.vdo')
KEYB_VDO = os.path.join(DATADIR, 'sampleb.vdo')
class TestSendEvents(object):
def setUp(self):
cmd = 'vncev -rfbport 5933 -rfbwait 1000'
self.server = pexpect.spawn(cmd, logfile=sys.stdout, timeout=2)
def tearDown(self):
self.server.terminate(force=True)
def assertKeyDown(self, key):
down = '^.*down:\s+\(%s\)\r' % hex(key)
self.server.expect(down)
def assertKeyUp(self, key):
up = '^.*up:\s+\(%s\)\r' % hex(key)
self.server.expect(up)
def assertMouse(self, x, y, buttonmask):
output = '^.*Ptr: mouse button mask %s at %d,%d' % (hex(buttonmask), x, y)
self.server.expect(output)
def assertDisconnect(self):
disco = 'Client 127.0.0.1 gone'
self.server.expect(disco)
def run_vncdo(self, commands):
cmd = 'vncdo -v -s :33 ' + commands
vnc = pexpect.spawn(cmd, logfile=sys.stdout, timeout=5)
retval = vnc.wait()
assert retval == 0, retval
def test_key_alpha(self):
self.run_vncdo('key z')
self.assertKeyDown(ord('z'))
self.assertKeyUp(ord('z'))
self.assertDisconnect()
def test_key_ctrl_a(self):
self.run_vncdo('key ctrl-a')
self.assertKeyDown(int(0xffe3))
self.assertKeyDown(ord('a'))
self.assertKeyUp(int(0xffe3))
self.assertKeyUp(ord('a'))
self.assertDisconnect()
def test_type(self):
string = 'abcdefghij'
self.run_vncdo('type %s' % string)
for key in string:
self.assertKeyDown(ord(key))
self.assertKeyUp(ord(key))
self.assertDisconnect()
def test_mouse_move(self):
# vncev only prints click events, but will include the position
self.run_vncdo('move 10 20 click 1')
self.assertMouse(10, 20, 0x1)
self.assertDisconnect()
def test_mouse_click_button_two(self):
self.run_vncdo('click 2')
self.assertMouse(0, 0, 0x2)
self.assertDisconnect()
def test_read_files(self):
self.run_vncdo('key x %s key y %s' % (KEYA_VDO, KEYB_VDO))
for key in 'xayb':
self.assertKeyDown(ord(key))
self.assertKeyUp(ord(key))
| dtreiter/vncdotool | tests/functional/test_send_events.py | Python | mit | 2,385 |
# little grammar for test
from pyrser import grammar
class CSV(grammar.Grammar):
entry = "csv"
grammar = """
csv = [ [@ignore("null") line : l #add_line(_, l)]+ eof ]
line =
[
item : c #add_col(_, c)
[';' item : c #add_col(_, c)]*
eol
]
item = [ [id | num] : i #add_item(_, i) ]
"""
class CSV2(grammar.Grammar, CSV):
entry = "csv2"
# copy the result of CSV.csv as result of csv2
grammar = """
csv2 = [ CSV.csv:>_ ]
item = [ [CSV.item]?:>_ ]
"""
| LionelAuroux/pyrser | tests/grammar/csv.py | Python | gpl-3.0 | 572 |
#$ -t 1-240
import os
import glob
SGE_TASK_ID = int(os.environ['SGE_TASK_ID'])
os.system('date')
os.system('hostname')
InputFiles = glob.glob('dyna*.mat')
print(InputFiles[SGE_TASK_ID])
os.system('matlab -nodesktop -nosplash -r "run_makeLoadsTemps(\'%s\')"' % InputFiles[SGE_TASK_ID])
| Guokr1991/cervix | field/9L4/makeLoads.sge.py | Python | mit | 289 |
#!/usr/bin/python
import sqlite3
import common
class DbCreator:
@staticmethod
def createEmpty(filename=None):
if None == filename:
filename=common.Config.getDbFilename()
print "create db in file :"+filename
conn = sqlite3.connect(filename)
print conn
c = conn.cursor()
c.execute('''DROP TABLE IF EXISTS sensorinos''')
c.execute('''CREATE TABLE sensorinos
(address TEXT PRIMARY KEY, name TEXT, description TEXT, owner TEXT, location TEXT)''')
c.execute('''DROP TABLE IF EXISTS services''')
c.execute('''CREATE TABLE services
(serviceId INTEGER PRIMARY KEY, name TEXT, stype TEXT, dataType TEXT, saddress TEXT, state TEXT)''')
c.execute('''DROP TABLE IF EXISTS dataServicesLog''')
c.execute('''CREATE TABLE dataServicesLog
(saddress TEXT, serviceId INTEGER, value TEXT, timestamp TEXT)''')
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
if __name__ == '__main__':
DbCreator.createEmpty()
| elektroid/SensorinoServer | server/database.py | Python | gpl-2.0 | 1,270 |
"""Support to turn on lights based on the states."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_PROFILE,
ATTR_TRANSITION,
DOMAIN as DOMAIN_LIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
EVENT_HOMEASSISTANT_START,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_HOME,
STATE_NOT_HOME,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
async_track_point_in_utc_time,
async_track_state_change,
)
from homeassistant.helpers.sun import get_astral_event_next, is_up
import homeassistant.util.dt as dt_util
DOMAIN = "device_sun_light_trigger"
CONF_DEVICE_GROUP = "device_group"
CONF_DISABLE_TURN_OFF = "disable_turn_off"
CONF_LIGHT_GROUP = "light_group"
CONF_LIGHT_PROFILE = "light_profile"
DEFAULT_DISABLE_TURN_OFF = False
DEFAULT_LIGHT_PROFILE = "relax"
LIGHT_TRANSITION_TIME = timedelta(minutes=15)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_DEVICE_GROUP): cv.entity_id,
vol.Optional(
CONF_DISABLE_TURN_OFF, default=DEFAULT_DISABLE_TURN_OFF
): cv.boolean,
vol.Optional(CONF_LIGHT_GROUP): cv.string,
vol.Optional(
CONF_LIGHT_PROFILE, default=DEFAULT_LIGHT_PROFILE
): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the triggers to control lights based on device presence."""
conf = config[DOMAIN]
disable_turn_off = conf[CONF_DISABLE_TURN_OFF]
light_group = conf.get(CONF_LIGHT_GROUP)
light_profile = conf[CONF_LIGHT_PROFILE]
device_group = conf.get(CONF_DEVICE_GROUP)
async def activate_on_start(_):
"""Activate automation."""
await activate_automation(
hass, device_group, light_group, light_profile, disable_turn_off
)
if hass.is_running:
await activate_on_start(None)
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, activate_on_start)
return True
async def activate_automation(
hass, device_group, light_group, light_profile, disable_turn_off
):
"""Activate the automation."""
logger = logging.getLogger(__name__)
device_tracker = hass.components.device_tracker
group = hass.components.group
light = hass.components.light
person = hass.components.person
if device_group is None:
device_entity_ids = hass.states.async_entity_ids(device_tracker.DOMAIN)
else:
device_entity_ids = group.get_entity_ids(device_group, device_tracker.DOMAIN)
device_entity_ids.extend(group.get_entity_ids(device_group, person.DOMAIN))
if not device_entity_ids:
logger.error("No devices found to track")
return
# Get the light IDs from the specified group
if light_group is None:
light_ids = hass.states.async_entity_ids(light.DOMAIN)
else:
light_ids = group.get_entity_ids(light_group, light.DOMAIN)
if not light_ids:
logger.error("No lights found to turn on")
return
@callback
def anyone_home():
"""Test if anyone is home."""
return any(device_tracker.is_on(dt_id) for dt_id in device_entity_ids)
@callback
def any_light_on():
"""Test if any light on."""
return any(light.is_on(light_id) for light_id in light_ids)
def calc_time_for_light_when_sunset():
"""Calculate the time when to start fading lights in when sun sets.
Returns None if no next_setting data available.
Async friendly.
"""
next_setting = get_astral_event_next(hass, SUN_EVENT_SUNSET)
if not next_setting:
return None
return next_setting - LIGHT_TRANSITION_TIME * len(light_ids)
async def async_turn_on_before_sunset(light_id):
"""Turn on lights."""
if not anyone_home() or light.is_on(light_id):
return
await hass.services.async_call(
DOMAIN_LIGHT,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: light_id,
ATTR_TRANSITION: LIGHT_TRANSITION_TIME.seconds,
ATTR_PROFILE: light_profile,
},
)
@callback
def async_turn_on_factory(light_id):
"""Generate turn on callbacks as factory."""
async def async_turn_on_light(now):
"""Turn on specific light."""
await async_turn_on_before_sunset(light_id)
return async_turn_on_light
# Track every time sun rises so we can schedule a time-based
# pre-sun set event
@callback
def schedule_light_turn_on(now):
"""Turn on all the lights at the moment sun sets.
We will schedule to have each light start after one another
and slowly transition in.
"""
start_point = calc_time_for_light_when_sunset()
if not start_point:
return
for index, light_id in enumerate(light_ids):
async_track_point_in_utc_time(
hass,
async_turn_on_factory(light_id),
start_point + index * LIGHT_TRANSITION_TIME,
)
async_track_point_in_utc_time(
hass, schedule_light_turn_on, get_astral_event_next(hass, SUN_EVENT_SUNRISE)
)
# If the sun is already above horizon schedule the time-based pre-sun set
# event.
if is_up(hass):
schedule_light_turn_on(None)
@callback
def check_light_on_dev_state_change(entity, old_state, new_state):
"""Handle tracked device state changes."""
lights_are_on = any_light_on()
light_needed = not (lights_are_on or is_up(hass))
# These variables are needed for the elif check
now = dt_util.utcnow()
start_point = calc_time_for_light_when_sunset()
# Do we need lights?
if light_needed:
logger.info("Home coming event for %s. Turning lights on", entity)
hass.async_create_task(
hass.services.async_call(
DOMAIN_LIGHT,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: light_ids, ATTR_PROFILE: light_profile},
)
)
# Are we in the time span were we would turn on the lights
# if someone would be home?
# Check this by seeing if current time is later then the point
# in time when we would start putting the lights on.
elif start_point and start_point < now < get_astral_event_next(
hass, SUN_EVENT_SUNSET
):
# Check for every light if it would be on if someone was home
# when the fading in started and turn it on if so
for index, light_id in enumerate(light_ids):
if now > start_point + index * LIGHT_TRANSITION_TIME:
hass.async_create_task(
hass.services.async_call(
DOMAIN_LIGHT, SERVICE_TURN_ON, {ATTR_ENTITY_ID: light_id}
)
)
else:
# If this light didn't happen to be turned on yet so
# will all the following then, break.
break
async_track_state_change(
hass,
device_entity_ids,
check_light_on_dev_state_change,
STATE_NOT_HOME,
STATE_HOME,
)
if disable_turn_off:
return
@callback
def turn_off_lights_when_all_leave(entity, old_state, new_state):
"""Handle device group state change."""
# Make sure there is not someone home
if anyone_home():
return
# Check if any light is on
if not any_light_on():
return
logger.info("Everyone has left but there are lights on. Turning them off")
hass.async_create_task(
hass.services.async_call(
DOMAIN_LIGHT, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: light_ids}
)
)
async_track_state_change(
hass,
device_entity_ids,
turn_off_lights_when_all_leave,
STATE_HOME,
STATE_NOT_HOME,
)
return
| nkgilley/home-assistant | homeassistant/components/device_sun_light_trigger/__init__.py | Python | apache-2.0 | 8,416 |
#!/usr/bin/env python
"""
Author: Derek Hohls
Date: June 2016
Purpose:
Generate a file (e.g. PDF or Excel) containing details of games (or game-
like objects). Basic layout is a table-per-game or a row-per-game.
Notes:
Huge thanks to authors and developers of the following Python Libraries:
* boardgamegeek
* reportlab
* xlwt
"""
# lib
from collections import OrderedDict
import json
import os
import sys
import time
# other
import xlwt
# reportlab
from reportlab.pdfgen import canvas
from reportlab.lib.enums import TA_RIGHT, TA_LEFT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.pagesizes import letter, A4, landscape
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.lib.units import inch, cm
from reportlab.lib.utils import ImageReader
from reportlab.lib.colors import black, white, slategray, slategrey, \
lightgrey, lightslategray, lightslategrey, \
red
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase.pdfmetrics import registerFontFamily
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, \
TableStyle, Image
FONTS = '.local/share/fonts' # path for Ubuntu Linux
HOME = os.path.expanduser("~")
BASE = os.path.join(HOME, FONTS)
class GameReportBuilder(object):
def __init__(self, *args, **kwargs):
__version_info__ = ('1', '0', '0')
self.__version__ = __version_info__
self.games = kwargs.get('games', []) # list of 'game' objects
self.user = kwargs.get('user', '')
self.time = kwargs.get('time', 'UK')
self.filename = kwargs.get('filename')
self.progress = kwargs.get('progress', False)
self.family_names = kwargs.get('familys', [])
self.font_names = kwargs.get('fonts', [])
self.page_footer = kwargs.get(
'page_footer', '')
self.page_header = kwargs.get(
'page_header', 'Board Game Geek Collection Printer (v0.1)')
header = kwargs.get('header')
body = kwargs.get('left')
margin = kwargs.get('margin', 72)
page_size = kwargs.get('page', 'A4')
if page_size == 'A4':
size = A4
elif page_size == 'letter':
size = Letter
else:
raise NotImplementedError('Page size "%" is not available' % page_size)
self.set_doc(filename=self.filename, margin=margin, page=size)
# fonts & styles
for fname in self.family_names:
self.ttf_register(fname, family=True)
for fname in self.font_names:
self.ttf_register(fname, family=False)
self.styles = getSampleStyleSheet()
self.set_styles(body, header) # style sheets pre-made
def ttf_register(self, name, family=False, base_dir=BASE):
"""
Register a font or a font family.
Example:
# http://www.1001freefonts.com/alegreya_sc.font
pdfmetrics.registerFont(TTFont('AlegreyaSCR',
os.path.join(base_dir, 'AlegreyaSC-Regular.ttf')))
pdfmetrics.registerFont(TTFont('AlegreyaSCI',
os.path.join(base_dir, 'AlegreyaSC-Italic.ttf')))
pdfmetrics.registerFont(TTFont('AlegreyaSCBI',
os.path.join(base_dir, 'AlegreyaSC-BoldItalic.ttf')))
pdfmetrics.registerFont(TTFont('AlegreyaSCB',
os.path.join(base_dir, 'AlegreyaSC-Bold.ttf')))
registerFontFamily(
'AlegreyaSC', normal='AlegreyaSCR', bold='AlegreyaSCB',
italic='AlegreyaSCI', boldItalic='AlegreyaSCBI')
Note:
Acrobat PDF has 14 built-in fonts, supported by reportlab:
Courier, Helvetica, Courier-Bold, Helvetica-Bold, Courier-Oblique,
Helvetica-Oblique, Courier-BoldOblique, Helvetica-BoldOblique,
Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
ZapfDingbats
"""
if not family:
pdfmetrics.registerFont(TTFont(name,
os.path.join(base_dir, '%s.ttf' % name)))
else:
pdfmetrics.registerFont(TTFont('%sR' % name,
os.path.join(base_dir, '%s-Regular.ttf' % name)))
pdfmetrics.registerFont(TTFont('%sI' % name,
os.path.join(base_dir, '%s-Italic.ttf' % name)))
pdfmetrics.registerFont(TTFont('%sBI' % name,
os.path.join(base_dir, '%s-BoldItalic.ttf' % name)))
pdfmetrics.registerFont(TTFont('%sB' % name,
os.path.join(base_dir, '%s-Bold.ttf' % name)))
registerFontFamily(
'%s', normal='%sR' % name, bold='%sB' % name,
italic='%sI' % name, boldItalic='%sBI' % name)
def set_doc(self, filename, margin=72, page=A4):
_filename = filename or 'games.pdf'
self.doc = SimpleDocTemplate(
_filename,
rightMargin=margin,
leftMargin=margin,
topMargin=margin,
bottomMargin=margin,
pagesize=page)
def set_styles(self, body, header):
"""
Make styles available to printing routines.
"""
body = body or 'Times'
header = header or 'Helvetica'
page_header = 'Helvetica'
page_footer = 'Helvetica'
try:
# body
self.styles.add(ParagraphStyle(
name='right',
fontName=body,
alignment=TA_RIGHT))
self.styles.add(ParagraphStyle(
name='left',
fontName=body,
alignment=TA_LEFT))
self.styles.add(ParagraphStyle(
name='centre',
fontName=body,
alignment=TA_CENTER))
# header
self.styles.add(ParagraphStyle(
name='CentreHeader',
fontName=header,
fontSize=14,
spaceBefore=3,
spaceAfter=4,
alignment=TA_CENTER))
self.styles.add(ParagraphStyle(
name='info',
fontName=header,
alignment=TA_LEFT)),
# page_...
self.styles.add(ParagraphStyle(
name='page_header',
fontName=page_header,
fontSize=8,
spaceAfter=6,
alignment=TA_LEFT)),
self.styles.add(ParagraphStyle(
name='page_footer',
fontName=page_footer,
fontSize=9,
alignment=TA_RIGHT))
except ValueError:
print "Unable to use or access the custom fonts!"
sys.exit(1)
def get_image(self, game, path, width=1*cm, height=None):
"""
Create an image from a path - either on on disc or from a web URL.
"""
if self.progress:
print "Retrieving image for game: %7d" % int(game.id)
img = ImageReader(path)
iw, ih = img.getSize()
aspect = ih / float(iw)
if height:
return Image(path, width=(height * aspect), height=height)
else:
return Image(path, width=width, height=(width * aspect))
def set_header_footer(self, canvas, doc):
"""
Set header and footer on each page; default is NO header and footer with
a page no.
"""
# Save canvas
canvas.saveState()
page_num = canvas.getPageNumber()
# Header
if self.page_header:
header = Paragraph(self.page_header, self.styles['page_header'])
w, h = header.wrap(doc.width, doc.topMargin)
header.drawOn(canvas, doc.leftMargin, doc.height + doc.topMargin - h)
# Footer
_footer = self.page_footer or "Pg. %s" % page_num
footer = Paragraph(_footer, self.styles['page_footer'])
w, h = footer.wrap(doc.width, doc.bottomMargin)
footer.drawOn(canvas, doc.leftMargin, h)
# Release the canvas
canvas.restoreState()
def create_json(self):
"""
Create a JSON file containing games' details; entries keyed on game ID
"""
game_list = {}
for number, game in enumerate(self.games):
game_list[int(game.id)] = game.__dict__
dump = json.dumps(game_list, indent=2, default=str)
_file = open(self.filename, 'w')
print >> _file, dump
_file.close()
def create_xls(self):
"""
Create an XLS spreadsheet displaying games' details; one game per row
"""
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Summary")
sheet.col(0).width = 256 * 60
bold_style = xlwt.easyxf('font: bold 1')
_items = (
('Name', 'name'),
('ID', 'id'),
('Weight', 'averageweight'),
('% Weight', 'percentageweight'),
('Year', 'yearpublished'),
('Age', 'age'),
('Time', 'playingtime'),
('Min.', 'minplayers'),
('Max', 'maxplayers'),
('Mechanics', 'mechanics'),
('Categories', 'categories'),
)
items = OrderedDict(_items)
for col, head in enumerate(items.keys()):
sheet.write(0, col, head, bold_style)
for number, game in enumerate(self.games):
if self.progress:
print "Creating the row for game: %7d" % int(game.id)
for col, head in enumerate(items.keys()):
sheet.write(number + 1, col, getattr(game, items[head]))
workbook.save(self.filename)
def create_table_summary(self, game, num):
"""
Create a reportlab table displaying summarised game information.
Args:
game: object
a BGGGame object (or similar) whose properties correspond to
game attributes e.g. name, description
"""
if self.progress:
print "Generating summary table for game: %7d" % int(game.id)
print "Generating a summary row for game: %7d" % int(game.id)
div = self.doc.width / 7.0
table_data = [
[
Paragraph('<b>%s</b>' % game.name, self.styles['left']),
Paragraph('<b>%s (%s)</b>' %
(game.averageweight, game.percentageweight),
self.styles['left']),
Paragraph('<b>%s</b>' % game.yearpublished, self.styles['left']),
Paragraph('<b>%s</b>' % game.age, self.styles['left']),
Paragraph('<b>%s</b>' % game.playingtime, self.styles['left']),
Paragraph('<b>%s</b>' % game.players, self.styles['left']),
]
]
if num == 0:
table_data.insert(0,
[
Paragraph('<b>Name</b>', self.styles['info']),
Paragraph('<b>Weight (%)</b>', self.styles['left']),
Paragraph('<b>Year</b>', self.styles['left']),
Paragraph('<b>Age</b>', self.styles['left']),
Paragraph('<b>Time</b>', self.styles['left']),
Paragraph('<b>Players</b>', self.styles['left']),
]
)
# create the table
game_table = Table(table_data,
colWidths=[div*2, div, div, div, div, div])
game_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.5, black),
('VALIGN',(0,0), (-1,-1), 'TOP'),
]),
)
return game_table
def create_table_compact(self, game):
"""
Create a compact reportlab table displaying game information.
Args:
game: object
a BGGGame object (or similar) whose properties correspond to
game attributes e.g. name, description
"""
if self.progress:
print "Generating table for game: %7d" % (int(game.id))
div = self.doc.width / 7.0
HT = 0.6 * cm
# note that 'n' in div * n MUST correspond to number of cols spanned
if 'geekdo-images' in game.image:
_image = game.image.replace('.jpg', '_sq.jpg').replace('.png', '_sq.png')
else:
_image = game.image
game_image = self.get_image(game, path=_image, height=HT*3 - 8)
table_data = [
[
game_image,
Paragraph('<b>%s</b>' % game.name, self.styles['info']),
'', '',
Paragraph('<b>%s</b>' % game.age, self.styles['centre']),
Paragraph('<b>%s</b> min' % game.playingtime, self.styles['centre']),
Paragraph('<b>%s</b> players' % game.players, self.styles['right'])
],
[
'', Paragraph('%s' % game.mechanics, self.styles['left']),
'', '', '', '', ''
],
[
'', Paragraph('%s' % game.categories, self.styles['left']),
'', '', '', '', ''
]
]
# create the table
game_table = Table(table_data,
colWidths=[div, div, div, div, div, div, div],
rowHeights=[HT] * len(table_data))
game_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.5, black),
('VALIGN',(0,0), (-1,-1), 'TOP'),
('SPAN',(0,0),(0,2)),
('SPAN',(1,0),(3,0)),
('SPAN',(1,1),(6,1)),
('SPAN',(1,2),(6,2)),
]),
)
return game_table
def create_table(self, game):
"""
Create a reportlab table displaying game information.
Args:
game: object
a BGGGame object (or similar) whose properties correspond to
game attributes e.g. name, description
"""
if self.progress:
print "Generating table for game: %7d" % int(game.id)
div = self.doc.width / 8.0
# note that 'n' in div * n MUST correspond to number of cols spanned
if 'geekdo-images' in game.image:
_image = game.image.replace('.jpg', '_md.jpg').replace('.png', '_md.png')
else:
_image = game.image
game_image = self.get_image(game, path=_image, width=div * 3 - 9)
table_data = [
[
Paragraph('<b>Ages</b>: %s' % game.age,
self.styles['info']),
'',
Paragraph('<b>Published</b>: %s' % game.yearpublished,
self.styles['info']),
'',
Paragraph('<b>Time</b>: %s min' % game.playingtime, self.styles['info']),
'',
Paragraph('<b>Players</b>: %s' % game.players, self.styles['info']),
''
],
[
Paragraph('<b>Categories</b>: %s' % game.categories, self.styles['info']),
'', '', '', '', '', '', ''
],
[
Paragraph('<b>Mechanics</b>: %s' % game.mechanics, self.styles['info']),
'', '', '', '', '', '', ''
],
[
Paragraph(game.description_html, self.styles['left']),
'', '', '', '',
game_image,
'', ''
]
]
# create the table
game_table = Table(table_data,
colWidths=[div, div, div, div, div, div, div, div])
game_table.setStyle(
TableStyle([
('BOX', (0, 0), (-1, -1), 0.5, black),
('VALIGN',(0,0), (-1,-1), 'TOP'),
('SPAN',(0,0),(1,0)),
('SPAN',(2,0),(3,0)),
('SPAN',(4,0),(5,0)),
('SPAN',(6,0),(7,0)),
('SPAN',(0,1),(7,1)),
('SPAN',(0,2),(7,2)),
('SPAN',(0,3),(4,3)),
('SPAN',(5,3),(7,3)),
]),
)
return game_table
def create_qr(self, ID, width=2*cm, prefix=None, suffix=None):
"""
Generate QR image for a (default) BGG game
"""
server = 'https://api.qrserver.com/v1/create-qr-code/?size=150x150&data='
_prefix = prefix or 'https://boardgamegeek.com/boardgame/'
_suffix = suffix or ''
url = '%s%s%s%s' % (server, prefix, ID, suffix)
img = self.get_image(url, width)
return img
def save_games(self, style='full'):
"""
Primary routine to drive creation of a reportlab PDF.
Elements such as paragraphs & tables are collated in a list; and then
the document is created.
Headers and Footer are set via the doc.build().
"""
elements = []
if style in ['summary', 'compact']:
elements.append(Spacer(1, 0.5*cm))
# All done!
if style in ['full', 'compact', 'summary']:
# Create table per game
for number, game in enumerate(self.games):
if style == 'full':
gtable = self.create_table(game)
header = Paragraph('<b>%s</b>' % game.name,
self.styles['CentreHeader'])
header.keepWithNext = True
elements.append(header)
elements.append(gtable)
elif style == 'compact':
gtable = self.create_table_compact(game)
elements.append(gtable)
elif style == 'summary':
gtable = self.create_table_summary(game, number)
elements.append(gtable)
# After tables
elements.append(Spacer(1, 0.5*cm))
if self.time == 'US':
_date = time.strftime("%b %d, %Y %H:%M")
else:
_date = time.strftime("%Y-%m-%d %H:%M")
p2 = Paragraph('Printed at %s' % _date, self.styles['right'])
elements.append(p2)
if self.progress:
print "Generating PDF Document... ... ..."
self.doc.build(
elements,
onFirstPage=self.set_header_footer,
onLaterPages=self.set_header_footer)
elif style == 'excel':
print "Generating XLS Spreadsheet ... ..."
self.create_xls()
elif style == 'json':
print "Generating a JSON File ... ... ..."
self.create_json()
else:
print 'The style "%s" does not exist!' % style
sys.exit(1) | gamesbook/gamereporter | report_builder.py | Python | mit | 18,929 |
#!/usr/bin/env python3
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import asyncio
import shlex
from cerbero.config import Platform
from cerbero.utils import shell, run_until_complete, messages as m
class Strip(object):
'''Wrapper for the strip tool'''
def __init__(self, config, excludes=None, keep_symbols=None):
self.config = config
self.excludes = excludes or []
self.keep_symbols = keep_symbols or []
self.strip_cmd = []
if 'STRIP' in config.env:
self.strip_cmd = shlex.split(config.env['STRIP'])
async def _async_strip_file(self, path):
if not self.strip_cmd:
m.warning('Strip command is not defined')
return
for f in self.excludes:
if f in path:
return
if self.config.target_platform == Platform.DARWIN:
cmd = self.strip_cmd + ['-x', path]
else:
cmd = self.strip_cmd[:]
for symbol in self.keep_symbols:
cmd += ['-K', symbol]
cmd += ['--strip-unneeded', path]
try:
await shell.async_call(cmd)
except Exception as e:
m.warning(e)
def strip_file(self, path):
run_until_complete(self._async_strip_file(path))
def strip_dir(self, dir_path):
if not self.strip_cmd:
m.warning('Strip command is not defined')
return
tasks = []
for dirpath, dirnames, filenames in os.walk(dir_path):
for f in filenames:
tasks.append(self._async_strip_file(os.path.join(dirpath, f)))
run_until_complete(tasks)
| nirbheek/cerbero | cerbero/tools/strip.py | Python | lgpl-2.1 | 2,480 |
import numpy as np
import pylab as pl
from math import sqrt
import os
from scipy.ndimage import gaussian_filter
def save_data(filename, data):
print("Saving data")
f = open(filename, 'w')
np.save(f, data)
f.close()
def save_dict(filename, data):
import pickle
print("Saving data")
f = open(filename, 'w')
pickle.dump(data, f)
f.close()
if __name__ == '__main__' :
import optparse
parser = optparse.OptionParser()
parser.add_option("-a", "--start",
dest="a",
help="Starting angle",
default=500,
type='int')
parser.add_option("-b", "--end",
dest="b",
help="Final angle",
default=500,
type='int')
parser.add_option("-c", "--step",
dest="c",
help="Step size",
default=500,
type='int')
(options, args) = parser.parse_args()
start = options.a - 1
stop = options.b
step = options.c
radii_filename = args[0]
index = int(args[1])
anal_path = args[2]
contact_filename = args[3]
# get the radius
radii = []
for i in range(start, stop, step):
radii.append(np.load(radii_filename % i))
radii_np = np.zeros((stop,180))
for i in range(stop/step):
radii_np[i*step:i*step+step,:] = radii[i]
# get the contact points
contact = []
for i in range(start, stop, step):
contact.append(np.load(contact_filename % i))
contact_np = np.zeros((stop,180))
for i in range(stop/step):
contact_np[i*step:i*step+step,:] = contact[i]
radius = np.mean(radii_np)
# # Dictionary to store the angles and outlier values
# outliers = {}
#
# # Threshold the image
# area1 = radii_np == 0
# area1 = area1 * 1
#
# # Store the angles of the anomalous values
# for i in range(start,stop):
# for j in range(0,180):
# if radii_np[i, j] == 0:
# angl = (i,j)
# outliers[angl] = 0
#
# # save image
# output_filename = anal_path + "/outliers%02i.dat" % index
# output_angles = anal_path + "/radii%02i.npy" % index
# print("Saving data %s" % output_filename)
# print("Saving data %s" % output_angles)
# save_data(output_angles, radii_np)
# save_dict(output_filename, outliers)
# just contact pts
# delete_list = []
# for i in range(start,stop):
# for j in range(0,180):
# if contact_np[i, j] == 0:
# delete_list.append((i, j))
#
# Plot
pl.subplot(2, 1, 1)
pl.imshow(radii_np.T)
pl.title(r'Radii of real sphere as a function of 2 spherical angles $\theta$ and $\phi$',\
fontdict={'fontsize': 16,'verticalalignment': 'bottom','horizontalalignment': 'center'})
pl.xlabel(r'$\theta$', fontdict={'fontsize': 14,'verticalalignment': 'top','horizontalalignment': 'center'})
pl.ylabel(r'$\phi$', fontdict={'fontsize': 14,'verticalalignment': 'bottom','horizontalalignment': 'right'}, rotation=0)
pl.colorbar(shrink=0.8)
pl.subplot(2, 1, 2)
pl.imshow(contact_np.T)
pl.xlabel(r'$\theta$', fontdict={'fontsize': 14,'verticalalignment': 'top','horizontalalignment': 'center'})
pl.ylabel(r'$\phi$', fontdict={'fontsize': 14,'verticalalignment': 'bottom','horizontalalignment': 'right'}, rotation=0)
pl.colorbar(shrink=0.8)
pl.savefig(anal_path + "/radii%02i_%f.png" % (index, radius))
pl.show()
| DiamondLightSource/auto_tomo_calibration-experimental | old_code_scripts/plot_radii.py | Python | apache-2.0 | 3,693 |
# Модуль os
import os
# Константа
# os.curdir Текущий каталог
print(os.curdir)
# os.pardir Родительский каталог
print(os.pardir)
# os.sep Разделитель элементов пути
print(os.sep)
# os.altsep Другой разделитель элементов пути
print(os.altsep)
# os.pathsep Разделитель путей в списке путей
print(os.pathsep)
# os.defpath Список путей по умолчанию
print(os.defpath)
# os.linesep Признак окончания строки
print(os.linesep)
# получить переменную окружения PATH
PATH = os.environ['PATH']
print(PATH)
# группа функций посвящена работе с файлами и каталогами
# getcwd() Текущий рабочий каталог.
print(os.getcwd())
# access(path,flags) Проверка доступности файла или каталога с именем path. Режим
# запрашиваемого доступа указывается значением flags, составленных
# комбинацией (побитовым ИЛИ) флагов os.F_OK (файл существует),
# os.R_OK (из файла можно читать), os.W_OK (в файл можно писать) и
# os.X_OK (файл можно исполнять, каталог можно просматривать).
path = os.getcwd()
flags = os.F_OK
print(os.access(path,flags))
# chdir(path) Делает path текущим рабочим каталогом.
# listdir(dir) Возвращает список файлов в каталоге dir. В список не входят
# специальные значения "." и "..".
print(os.listdir(path))
# mkdir(path[,mode]) Создает каталог path. По умолчанию режим mode равен 0777, то есть:
# S_IRWXU|S_IRWXG|S_IRWXO, если пользоваться константами модуля stat.
# makedirs(path[,mode]) Аналог mkdir(), создающий все необходимые каталоги, если они не
# существуют. Возбуждает исключение, когда последний каталог уже существует.
# os.mkdir('test')
print(os.listdir(path))
# chmod(path, mode) Устанавливает режим доступа к path в значение mode. Режим доступа
# можно получить, скомбинировав флаги. Следует заметить,
# что chmod() не дополняет действующий режим, а устанавливает его заново.
path_test = 'test'
mode = 0o777
os.chmod(path_test, mode)
# rmdir(path) Удаляет пустой каталог path.
os.rmdir(path_test)
print(os.listdir(path))
# remove(path),unlink(path) Удаляет файл path. Для удаления каталогов используются rmdir() и
# removedirs().
# removedirs(path) Удаляет path до первого непустого каталога. В случае если самый
# последний вложенный подкаталог в указанном пути - не пустой,
# возбуждается исключение OSError.
# rename(src, dst) Переименовывает файл или каталог src в dst.
# renames(src, dst) Аналог rename(), создающий все необходимые каталоги для пути dst
# и удаляющий пустые каталоги пути src.
# utime(path,times) Устанавливает значения времен последней модификации (mtime) и
# доступа к файлу (atime). Если times равен None, в качестве времен
# берется текущее время. В других случаях times рассматривается как
# двухэлементный кортеж (atime, mtime). Для получения atime и mtime
# некоторого файла можно использовать stat() совместно с
# константами модуля stat.
# stat(path) Возвращает информацию о path в виде не менее чем
# десятиэлементного кортежа. Для доступа к элементам кортежа можно
# использовать константы из модуля stat, например stat.ST_MTIME
# (время последней модификации файла).
| janusnic/21v-python | unit_06/ospath.py | Python | mit | 4,620 |
# Copyright 2016 Neverware Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
from os import path
from setuptools import setup
SCRIPT_PATH = path.abspath(path.dirname(__file__))
def read_readme():
"""Get the contents of the README.rst file."""
readme_path = path.join(SCRIPT_PATH, 'README.rst')
with open(readme_path) as rfile:
return rfile.read()
setup(
name='bios_pnp',
version='1.1.1',
description='Very simple module that enumerates Legacy Plug and Play devices',
long_description=read_readme(),
url='https://github.com/nicholasbishop/bios_pnp',
author='Nicholas Bishop',
author_email='[email protected]',
license='Apache 2.0',
packages=['bios_pnp'],
install_requires=['attrs>=16.0.0'],
)
| neverware/bios_pnp | setup.py | Python | apache-2.0 | 1,303 |
from unittest import mock
import pytest
import ispyb.model.pdb
def test_pdb_values_are_immutable():
P = ispyb.model.pdb.PDB()
with pytest.raises(AttributeError):
P.name = "test"
with pytest.raises(AttributeError):
P.rawfile = "test"
with pytest.raises(AttributeError):
P.code = "test"
def test_pdb_values_can_be_read_back():
P = ispyb.model.pdb.PDB(
name=mock.sentinel.name, rawfile=mock.sentinel.rawfile, code=mock.sentinel.code
)
assert P.name == mock.sentinel.name
assert P.rawfile == mock.sentinel.rawfile
assert P.code == mock.sentinel.code
P = ispyb.model.pdb.PDB()
assert P.name is None
assert P.rawfile is None
assert P.code is None
P = ispyb.model.pdb.PDB(name="", rawfile="", code="")
assert P.name == ""
assert P.rawfile is None
assert P.code is None
def test_pdb_object_representation():
P = ispyb.model.pdb.PDB(name="somename", rawfile="x" * 100, code="somecode")
assert repr(P) == "<PDB somename>"
assert "somename" in str(P)
assert "100 bytes" in str(P)
assert "xxxxxxxxx" not in str(P)
assert "somecode" in str(P)
| DiamondLightSource/ispyb-api | tests/model/test_pdb.py | Python | apache-2.0 | 1,164 |
# -*- coding: utf-8 -*-
import os
import json
import logging
os.environ['GIT_WEBHOOK_CONFIG'] = 'config_test.py'
logging.basicConfig(level=logging.DEBUG)
TEST_DIR = os.path.dirname(__file__)
WEBHOOKDATA_DIR = os.path.join(TEST_DIR, 'webhookdata')
WEBHOOKDATA = {}
for filename in os.listdir(WEBHOOKDATA_DIR):
name = os.path.splitext(filename)[0]
with open(os.path.join(WEBHOOKDATA_DIR, filename)) as f:
data = json.load(f)
WEBHOOKDATA[name] = data
with open(os.path.join(TEST_DIR, '../docker/ssh/id_rsa')) as f:
RSA_PRIVATE_KEY = f.read()
def success(response):
if response.status_code != 200:
print(response.data)
if response.status_code == 200:
data = json.loads(response.data)
return data['success']
return False
def load_data(response):
data = json.loads(response.data)
return data['data']
| hustcc/git_hooks | tests/__init__.py | Python | mit | 868 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoind with different proxy configuration.
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| ericshawlinux/bitcoin | test/functional/feature_proxy.py | Python | mit | 8,335 |
from __future__ import unicode_literals
import warnings
from django.contrib import admin
from django.contrib.auth import logout
from django.contrib.messages import error
from django.contrib.redirects.models import Redirect
from django.core.exceptions import MiddlewareNotUsed
from django.core.urlresolvers import reverse, resolve
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseGone)
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import Template, RequestContext
from django.utils.cache import get_max_age
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from mezzanine.conf import settings
from mezzanine.core.models import SitePermission
from mezzanine.core.management.commands.createdb import (DEFAULT_USERNAME,
DEFAULT_PASSWORD)
from mezzanine.utils.cache import (cache_key_prefix, nevercache_token,
cache_get, cache_set, cache_installed)
from mezzanine.utils.device import templates_for_device
from mezzanine.utils.deprecation import MiddlewareMixin, get_middleware_setting
from mezzanine.utils.sites import current_site_id, templates_for_host
from mezzanine.utils.urls import next_url
class AdminLoginInterfaceSelectorMiddleware(MiddlewareMixin):
"""
Checks for a POST from the admin login view and if authentication is
successful and the "site" interface is selected, redirect to the site.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
login_type = request.POST.get("mezzanine_login_interface")
if login_type and not request.user.is_authenticated():
response = view_func(request, *view_args, **view_kwargs)
if request.user.is_authenticated():
if login_type == "admin":
next = next_url(request) or request.get_full_path()
username = request.user.get_username()
if (username == DEFAULT_USERNAME and
request.user.check_password(DEFAULT_PASSWORD)):
error(request, mark_safe(_(
"Your account is using the default password, "
"please <a href='%s'>change it</a> immediately.")
% reverse("user_change_password",
args=(request.user.id,))))
else:
next = "/"
return HttpResponseRedirect(next)
else:
return response
return None
class SitePermissionMiddleware(MiddlewareMixin):
"""
Marks the current user with a ``has_site_permission`` which is
used in place of ``user.is_staff`` to achieve per-site staff
access.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
has_site_permission = False
if request.user.is_superuser:
has_site_permission = True
elif request.user.is_staff:
lookup = {"user": request.user, "sites": current_site_id()}
try:
SitePermission.objects.get(**lookup)
except SitePermission.DoesNotExist:
admin_index = reverse("admin:index")
if request.path.startswith(admin_index):
logout(request)
view_func = admin.site.login
extra_context = {"no_site_permission": True}
return view_func(request, extra_context=extra_context)
else:
has_site_permission = True
request.user.has_site_permission = has_site_permission
class TemplateForDeviceMiddleware(MiddlewareMixin):
"""
Inserts device-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
if not isinstance(response.template_name, Template):
templates = templates_for_device(request,
response.template_name)
response.template_name = templates
return response
class TemplateForHostMiddleware(MiddlewareMixin):
"""
Inserts host-specific templates to the template list.
"""
def process_template_response(self, request, response):
if hasattr(response, "template_name"):
if not isinstance(response.template_name, Template):
response.template_name = templates_for_host(
response.template_name)
return response
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response phase for Mezzanine's cache middleware. Handles caching
the response, and then performing the second phase of rendering,
for content enclosed by the ``nevercache`` tag.
"""
def process_response(self, request, response):
# Caching is only applicable for text-based, non-streaming
# responses. We also skip it for non-200 statuses during
# development, so that stack traces are correctly rendered.
is_text = response.get("content-type", "").startswith("text")
valid_status = response.status_code == 200
streaming = getattr(response, "streaming", False)
if not is_text or streaming or (settings.DEBUG and not valid_status):
return response
# Cache the response if all the required conditions are met.
# Response must be marked for updating by the
# ``FetchFromCacheMiddleware`` having a cache get miss, the
# user must not be authenticated, the HTTP status must be OK
# and the response mustn't include an expiry age, indicating it
# shouldn't be cached.
marked_for_update = getattr(request, "_update_cache", False)
anon = hasattr(request, "user") and not request.user.is_authenticated()
timeout = get_max_age(response)
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
if anon and valid_status and marked_for_update and timeout:
cache_key = cache_key_prefix(request) + request.get_full_path()
_cache_set = lambda r: cache_set(cache_key, r.content, timeout)
if callable(getattr(response, "render", None)):
response.add_post_render_callback(_cache_set)
else:
_cache_set(response)
# Second phase rendering for non-cached template code and
# content. Split on the delimiter the ``nevercache`` tag
# wrapped its contents in, and render only the content
# enclosed by it, to avoid possible template code injection.
token = nevercache_token()
try:
token = token.encode('utf-8')
except AttributeError:
pass
parts = response.content.split(token)
# Restore csrf token from cookie - check the response
# first as it may be being set for the first time.
csrf_token = None
try:
csrf_token = response.cookies[settings.CSRF_COOKIE_NAME].value
except KeyError:
try:
csrf_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
pass
if csrf_token:
request.META["CSRF_COOKIE"] = csrf_token
context = RequestContext(request)
for i, part in enumerate(parts):
if i % 2:
part = Template(part).render(context).encode("utf-8")
parts[i] = part
response.content = b"".join(parts)
response["Content-Length"] = len(response.content)
if hasattr(request, '_messages'):
# Required to clear out user messages.
request._messages.update(response)
# Response needs to be run-through the CSRF middleware again so
# that if there was a {% csrf_token %} inside of the nevercache
# the cookie will be correctly set for the the response
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
if csrf_mw_name in get_middleware_setting():
response.csrf_processing_done = False
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_response(request, response)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request phase for Mezzanine cache middleware. Return a response
from cache if found, othwerwise mark the request for updating
the cache in ``UpdateCacheMiddleware``.
"""
def process_request(self, request):
if (cache_installed() and request.method == "GET" and
not request.user.is_authenticated()):
cache_key = cache_key_prefix(request) + request.get_full_path()
response = cache_get(cache_key)
# We need to force a csrf token here, as new sessions
# won't receieve one on their first request, with cache
# middleware running.
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
if csrf_mw_name in get_middleware_setting():
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_view(request, lambda x: None, None, None)
get_token(request)
if response is None:
request._update_cache = True
else:
return HttpResponse(response)
class SSLRedirectMiddleware(MiddlewareMixin):
"""
Handles redirections required for SSL when ``SSL_ENABLED`` is ``True``.
If ``SSL_FORCE_HOST`` is ``True``, and is not the current host,
redirect to it.
Also ensure URLs defined by ``SSL_FORCE_URL_PREFIXES`` are redirect
to HTTPS, and redirect all other URLs to HTTP if on HTTPS.
"""
def __init__(self, *args):
warnings.warn(
"SSLRedirectMiddleware is deprecated. See "
"https://docs.djangoproject.com/en/stable/ref/middleware/"
"#module-django.middleware.security for alternative solutions.",
DeprecationWarning)
super(SSLRedirectMiddleware, self).__init__(*args)
def languages(self):
if not hasattr(self, "_languages"):
self._languages = dict(settings.LANGUAGES).keys()
return self._languages
def process_request(self, request):
force_host = settings.SSL_FORCE_HOST
response = None
if force_host and request.get_host().split(":")[0] != force_host:
url = "http://%s%s" % (force_host, request.get_full_path())
response = HttpResponsePermanentRedirect(url)
elif settings.SSL_ENABLED and not settings.DEV_SERVER:
url = "%s%s" % (request.get_host(), request.get_full_path())
path = request.path
if settings.USE_I18N and path[1:3] in self.languages():
path = path[3:]
if path.startswith(settings.SSL_FORCE_URL_PREFIXES):
if not request.is_secure():
response = HttpResponseRedirect("https://%s" % url)
elif request.is_secure() and settings.SSL_FORCED_PREFIXES_ONLY:
response = HttpResponseRedirect("http://%s" % url)
if response and request.method == "POST":
if resolve(request.get_full_path()).url_name == "fb_do_upload":
# The handler for the flash file uploader in filebrowser
# doesn't have access to the http headers Django will use
# to determine whether the request is secure or not, so
# in this case we don't attempt a redirect - note that
# when /admin is restricted to SSL using Mezzanine's SSL
# setup, the flash uploader will post over SSL, so
# someone would need to explictly go out of their way to
# trigger this.
return
# Tell the client they need to re-POST.
response.status_code = 307
return response
class RedirectFallbackMiddleware(MiddlewareMixin):
"""
Port of Django's ``RedirectFallbackMiddleware`` that uses
Mezzanine's approach for determining the current site.
"""
def __init__(self, *args, **kwargs):
super(RedirectFallbackMiddleware, self).__init__(*args, **kwargs)
if "django.contrib.redirects" not in settings.INSTALLED_APPS:
raise MiddlewareNotUsed
def process_response(self, request, response):
if response.status_code == 404:
lookup = {
"site_id": current_site_id(),
"old_path": request.get_full_path(),
}
try:
redirect = Redirect.objects.get(**lookup)
except Redirect.DoesNotExist:
pass
else:
if not redirect.new_path:
response = HttpResponseGone()
else:
response = HttpResponsePermanentRedirect(redirect.new_path)
return response
| ryneeverett/mezzanine | mezzanine/core/middleware.py | Python | bsd-2-clause | 13,114 |
# MIT License
#
# Copyright (c) 2018 Jonathan Lorraine, Google LLC
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from .unet import UNet
from .resnet import ResNet18
class CBRStudent(nn.Module):
def __init__(self, num_channels, num_classes):
super(CBRStudent, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(num_channels, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
fcsize = 64 if num_channels == 1 else 256
self.fc_pi = nn.Linear(fcsize, num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out_pi = self.fc_pi(out)
return out_pi
class UNetTeacher(nn.Module):
def __init__(self, num_channels, args):
super(UNetTeacher, self).__init__()
self.unet = UNet(in_channels=num_channels, n_classes=1, depth=2, wf=3, padding=True,
batch_norm=True, do_noise_channel=False, up_mode='upsample',use_identity_residual=False)
self.bg_weight = args.bg
self.min_std = args.min_std
self.max_std = args.max_std
self.use_exp = args.use_exp
self.dataset = args.dataset
def forward(self, x):
out = self.unet(x).squeeze() # should be of shape N x H x W
# print(out.shape)
out = F.softmax(out.reshape(x.size(0),-1))
out = out.reshape(x.size(0), x.size(2), x.size(3)).unsqueeze(1)
out = out.repeat(1, 2, 1, 1) # shape N x 2 x H x W
meshgrid_x, meshgrid_y = torch.meshgrid(torch.arange(x.size(2)),torch.arange(x.size(3)))
mesh = torch.stack([meshgrid_x, meshgrid_y], dim=0).unsqueeze(0).cuda()
mesh = mesh.repeat(x.size(0), 1,1,1) # shape N x 2 x H x W
mean = torch.sum(out*mesh, dim=[2,3]) # shape N x 2
std = self.min_std
mask = self.bg_weight + torch.exp(torch.sum(-1*(mean.view(-1,2, 1,1) - mesh)**2 / (2*std**2), dim=1))
return mask.unsqueeze(1)
class CBRTeacher(nn.Module):
def __init__(self, num_channels):
super(CBRTeacher, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(num_channels, 32, kernel_size=5, stride=2, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2))
fcsize = 64 if num_channels == 1 else 256
self.fc_cent = nn.Linear(fcsize, 2)
self.fc_std = nn.Linear(fcsize, 2)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
mean = x.size(2)//2 + x.size(2)//2*torch.tanh(self.fc_cent(out))
std = 2 + 10*torch.sigmoid(self.fc_std(out))
# print(mean.mean(dim=0), std.mean(dim=0))
meshgrid_x, meshgrid_y = torch.meshgrid(torch.arange(x.size(2)),torch.arange(x.size(3)))
mesh = torch.stack([meshgrid_x, meshgrid_y], dim=0).unsqueeze(0).cuda()
mesh = mesh.repeat(x.size(0), 1,1,1)
mask = 0.5 + torch.exp(torch.sum(-1*(mean.view(-1,2, 1,1) - mesh)**2 / (2*std**2).view(-1,2,1,1), dim=1))
print(mean.mean(), mean.std(),std.mean(), std.std())
return mask.unsqueeze(1).repeat(1, x.size(1), 1, 1)
class GaussianDropout(nn.Module):
def __init__(self, dropout):
super(GaussianDropout, self).__init__()
self.dropout = dropout
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
# N(1, alpha)
if self.training:
dropout = F.sigmoid(self.dropout)
if x.is_cuda:
epsilon = torch.randn(x.size()).cuda() * (dropout / (1 - dropout)) + 1
else:
epsilon = torch.randn(x.size()) * (dropout / (1 - dropout)) + 1
return x * epsilon
else:
'''
epsilon = torch.randn(x.size()).double() * (model.dropout / (1 - model.dropout)) + 1
if x.is_cuda:
epsilon = epsilon.cuda()
return x * epsilon
'''
return x
class BernoulliDropout(nn.Module):
def __init__(self, dropout):
super(BernoulliDropout, self).__init__()
self.dropout = dropout
def forward(self, x):
"""
Sample noise e ~ N(1, alpha)
Multiply noise h = h_ * e
"""
temperature = 0.5
# N(1, alpha)
if self.training:
u = Variable(torch.rand(x.size()))
if x.is_cuda:
u = u.cuda()
z = F.sigmoid(self.dropout) + torch.log(u / (1 - u))
a = F.sigmoid(z / temperature)
return x * a
else:
return x
class reshape(nn.Module):
def __init__(self, size):
super(reshape, self).__init__()
self.size = size
def forward(self, x):
return x.view(-1, self.size)
class SimpleConvNet(nn.Module):
def __init__(self, batch_norm=True, dropType='bernoulli', conv_drop1=0.0, conv_drop2=0.0, fc_drop=0.0):
super(SimpleConvNet, self).__init__()
self.batch_norm = batch_norm
self.dropType = dropType
if dropType == 'bernoulli':
self.conv1_dropout = nn.Dropout(conv_drop1)
self.conv2_dropout = nn.Dropout(conv_drop2)
self.fc_dropout = nn.Dropout(fc_drop)
elif dropType == 'gaussian':
self.conv1_dropout = GaussianDropout(conv_drop1)
self.conv2_dropout = GaussianDropout(conv_drop2)
self.fc_dropout = GaussianDropout(fc_drop)
if batch_norm:
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
self.conv1_dropout,
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
self.conv2_dropout,
nn.MaxPool2d(2))
else:
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.ReLU(),
self.conv1_dropout,
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.ReLU(),
self.conv2_dropout,
nn.MaxPool2d(2))
self.fc = nn.Linear(7*7*32, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc_dropout(self.fc(out))
return out
class CNN(nn.Module):
def __init__(self, num_layers, dropout, size, weight_decay, in_channel, imsize, do_alexnet=False, num_classes=10):
super(CNN, self).__init__()
self.dropout = Variable(torch.FloatTensor([dropout]), requires_grad=True)
self.weight_decay = Variable(torch.FloatTensor([weight_decay]), requires_grad=True)
self.do_alexnet = do_alexnet
self.num_classes = num_classes
self.in_channel = in_channel
self.imsize = imsize
if self.do_alexnet:
self.features = nn.Sequential(
nn.Conv2d(self.in_channel, 64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
if imsize == 32:
self.view_size = 256 * 2 * 2
elif imsize == 28:
self.view_size = 256
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.view_size, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, self.num_classes),
)
else:
self.features = nn.Sequential(
nn.Conv2d(self.in_channel, 20, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2),
)
if imsize == 32:
self.view_size = 20 * 8 * 8
elif imsize == 28:
self.view_size = 980
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.view_size, 250),
nn.ReLU(inplace=True),
#nn.Dropout(),
#nn.Linear(250, 250),
#nn.ReLU(inplace=True),
nn.Linear(250, self.num_classes),
)
def do_train(self):
self.features.train()
self.classifier.train()
def do_eval(self):
self.features.train()
self.classifier.train()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def L2_loss(self):
loss = 0
for p in self.parameters():
loss += torch.sum(torch.mul(p, p))
return loss * (10 ** self.weight_decay)
def all_L2_loss(self):
loss = 0
count = 0
for p in self.parameters():
#val = torch.flatten(p) - self.weight_decay[count: count + p.numel()]
loss += torch.sum(
torch.mul(torch.exp(self.weight_decay[count: count + p.numel()]), torch.flatten(torch.mul(p, p))))
#loss += 1e-3 * torch.sum(torch.mul(val, val))
count += p.numel()
return loss
class Net(nn.Module):
def __init__(self, num_layers, dropout, size, channel, weight_decay, num_classes=10, do_res=False,
do_classification=True):
super(Net, self).__init__()
self.dropout = Variable(torch.FloatTensor([dropout]), requires_grad=True)
self.weight_decay = Variable(torch.FloatTensor([weight_decay]), requires_grad=True)
self.imsize = size * size * channel
if not do_classification: self.imsize = size * channel
self.do_res = do_res
l_sizes = [self.imsize, self.imsize] + [50] * 20
network = []
# self.Gaussian = BernoulliDropout(self.dropout)
# network.append(nn.Dropout())
for i in range(num_layers):
network.append(nn.Linear(l_sizes[i], l_sizes[i + 1]))
# network.append(self.Gaussian)
network.append(nn.ReLU())
#network.append(nn.Dropout())
network.append(nn.Linear(l_sizes[num_layers], num_classes))
self.net = nn.Sequential(*network)
def forward(self, x):
cur_shape = x.shape
if not self.do_res:
return self.net(x.view(-1, self.imsize))# .reshape(cur_shape)
else:
res = self.net(x.view(-1, self.imsize)).reshape(cur_shape)
return x + res
def do_train(self):
self.net.train()
def do_eval(self):
self.net.eval()
def L2_loss(self):
loss = .0
for p in self.parameters():
loss = loss + torch.sum(torch.mul(p, p)) * torch.exp(self.weight_decay)
return loss
def all_L2_loss(self):
loss = .0
count = 0
for p in self.parameters():
loss = loss + torch.sum(
torch.mul(torch.exp(self.weight_decay[count: count + p.numel()]), torch.flatten(torch.mul(p, p))))
count += p.numel()
return loss
| googleinterns/commentaries | third_party/implicit_hyper_opt/models/simple_models.py | Python | apache-2.0 | 12,560 |
import logging
from pathlib import Path
from pymerra2 import download
# Here we process multiple variables at a time to avoid downloading
# original data twice (all these variables are in the same files).
# These variables names are user choices, their merra-2 equivalent are
# specified below or in the default pymerra2_variables.py
var_names = ["evspsbl", "huss", "prbc", "tas", "sic", "snw", "uas", "vas", "ps"]
var_names = ["hur"]
delete_temp_dir = False
download_dir = Path.cwd().joinpath("downloaded")
merra2_server = "https://goldsmr4.gesdisc.eosdis.nasa.gov/data/"
merra2_server = "https://goldsmr5.gesdisc.eosdis.nasa.gov/data/"
# The variables specification is in the same order as var_names above.
# esdt_dir, collection and merra_name can be found from
# https://gmao.gsfc.nasa.gov/pubs/docs/Bosilovich785.pdf
# https://goldsmr4.gesdisc.eosdis.nasa.gov/data/
# standard_name comes from
# http://cfconventions.org/standard-names.html
# Optionally, if all the variables are already in the default
# pymerra2_variables.py, this can be set to None.
# This loop will create monthly files of hourly MERRA2 data
for yyyy in range(2017, 2019):
for mm in range(1, 13):
try:
download.subdaily_download_and_convert(
merra2_server,
var_names,
merra2_var_dicts=None,
initial_year=yyyy,
final_year=yyyy,
initial_month=mm,
final_month=mm,
initial_day=1,
final_day=None,
output_dir=download_dir,
delete_temp_dir=delete_temp_dir,
)
except Exception as e:
msg = "{}: File not found".format(e)
logging.error(msg)
continue
| bstdenis/pymerra2 | scripts/merra2_subdaily_download.py | Python | apache-2.0 | 1,774 |
#!/usr/bin/env python
# encoding: utf-8
"""
Waf tool for PX4 build
"""
from waflib import Errors, Logs, Task, Utils
from waflib.TaskGen import after_method, before_method, feature
import os
import shutil
import sys
_dynamic_env_data = {}
def _load_dynamic_env_data(bld):
bldnode = bld.bldnode.make_node('modules/PX4Firmware')
for name in ('cxx_flags', 'include_dirs', 'definitions'):
_dynamic_env_data[name] = bldnode.find_node(name).read().split(';')
@feature('px4_ap_library', 'px4_ap_program')
@before_method('process_source')
def px4_dynamic_env(self):
# The generated files from configuration possibly don't exist if it's just
# a list command (TODO: figure out a better way to address that).
if self.bld.cmd == 'list':
return
if not _dynamic_env_data:
_load_dynamic_env_data(self.bld)
self.env.append_value('INCLUDES', _dynamic_env_data['include_dirs'])
self.env.prepend_value('CXXFLAGS', _dynamic_env_data['cxx_flags'])
self.env.prepend_value('CXXFLAGS', _dynamic_env_data['definitions'])
# Single static library
# NOTE: This only works only for local static libraries dependencies - fake
# libraries aren't supported yet
@feature('px4_ap_program')
@after_method('apply_link')
@before_method('process_use')
def px4_import_objects_from_use(self):
queue = list(Utils.to_list(getattr(self, 'use', [])))
names = set()
while queue:
name = queue.pop(0)
if name in names:
continue
names.add(name)
try:
tg = self.bld.get_tgen_by_name(name)
except Errors.WafError:
continue
tg.post()
for t in getattr(tg, 'compiled_tasks', []):
self.link_task.set_inputs(t.outputs)
queue.extend(Utils.to_list(getattr(tg, 'use', [])))
class px4_copy(Task.Task):
color = 'CYAN'
def run(self):
shutil.copy2(self.inputs[0].abspath(), self.outputs[0].abspath())
def keyword(self):
return "PX4: Copying %s to" % self.inputs[0].name
def __str__(self):
return self.outputs[0].path_from(self.generator.bld.bldnode)
class px4_add_git_hashes(Task.Task):
run_str = '${PYTHON} ${PX4_ADD_GIT_HASHES} --ardupilot ${PX4_APM_ROOT} --px4 ${PX4_ROOT} --nuttx ${PX4_NUTTX_ROOT} --uavcan ${PX4_UAVCAN_ROOT} ${SRC} ${TGT}'
color = 'CYAN'
def keyword(self):
return "PX4: Copying firmware and adding git hashes"
def __str__(self):
return self.outputs[0].path_from(self.outputs[0].ctx.launch_node())
def _update_firmware_sig(fw_task, firmware, elf):
original_post_run = fw_task.post_run
def post_run():
original_post_run()
firmware.sig = firmware.cache_sig = Utils.h_file(firmware.abspath())
elf.sig = elf.cache_sig = Utils.h_file(elf.abspath())
fw_task.post_run = post_run
_cp_px4io = None
_firmware_semaphorish_tasks = []
_upload_task = []
@feature('px4_ap_program')
@after_method('process_source')
def px4_firmware(self):
global _cp_px4io, _firmware_semaphorish_tasks, _upload_task
version = self.env.get_flat('PX4_VERSION')
px4 = self.bld.cmake('px4')
px4.vars['APM_PROGRAM_LIB'] = self.link_task.outputs[0].abspath()
if self.env.PX4_USE_PX4IO and not _cp_px4io:
px4io_task = self.create_cmake_build_task('px4', 'fw_io')
if version == '3':
px4io_version = '2'
else:
px4io_version = version
px4io = px4io_task.cmake.bldnode.make_node(
'src/modules/px4iofirmware/px4io-v%s.bin' % px4io_version,
)
px4io_elf = px4.bldnode.make_node(
'src/modules/px4iofirmware/px4io-v%s' % px4io_version
)
px4io_task.set_outputs([px4io, px4io_elf])
romfs = self.bld.bldnode.make_node(self.env.PX4_ROMFS_BLD)
romfs_px4io = romfs.make_node('px4io/px4io.bin')
romfs_px4io.parent.mkdir()
_cp_px4io = self.create_task('px4_copy', px4io, romfs_px4io)
_cp_px4io.keyword = lambda: 'PX4: Copying PX4IO to ROMFS'
px4io_elf_dest = self.bld.bldnode.make_node(self.env.PX4IO_ELF_DEST)
cp_px4io_elf = self.create_task('px4_copy', px4io_elf, px4io_elf_dest)
fw_task = self.create_cmake_build_task(
'px4',
'build_firmware_px4fmu-v%s' % version,
)
fw_task.set_run_after(self.link_task)
# we need to synchronize in order to avoid the output expected by the
# previous ap_program being overwritten before used
for t in _firmware_semaphorish_tasks:
fw_task.set_run_after(t)
_firmware_semaphorish_tasks = []
if self.env.PX4_USE_PX4IO and _cp_px4io.generator is self:
fw_task.set_run_after(_cp_px4io)
firmware = px4.bldnode.make_node(
'src/firmware/nuttx/nuttx-px4fmu-v%s-apm.px4' % version,
)
fw_elf = px4.bldnode.make_node(
'src/firmware/nuttx/firmware_nuttx',
)
_update_firmware_sig(fw_task, firmware, fw_elf)
fw_dest = self.bld.bldnode.make_node(
os.path.join(self.program_dir, '%s.px4' % self.program_name)
)
git_hashes = self.create_task('px4_add_git_hashes', firmware, fw_dest)
git_hashes.set_run_after(fw_task)
_firmware_semaphorish_tasks.append(git_hashes)
fw_elf_dest = self.bld.bldnode.make_node(
os.path.join(self.program_dir, self.program_name)
)
cp_elf = self.create_task('px4_copy', fw_elf, fw_elf_dest)
cp_elf.set_run_after(fw_task)
_firmware_semaphorish_tasks.append(cp_elf)
self.build_summary = dict(
target=self.name,
binary=fw_elf_dest.path_from(self.bld.bldnode),
)
if self.bld.options.upload:
if _upload_task:
Logs.warn('PX4: upload for %s ignored' % self.name)
return
_upload_task = self.create_cmake_build_task('px4', 'upload')
_upload_task.set_run_after(fw_task)
_firmware_semaphorish_tasks.append(_upload_task)
def _px4_taskgen(bld, **kw):
if 'cls_keyword' in kw and not callable(kw['cls_keyword']):
cls_keyword = str(kw['cls_keyword'])
kw['cls_keyword'] = lambda tsk: 'PX4: ' + cls_keyword
if 'cls_str' in kw and not callable(kw['cls_str']):
cls_str = str(kw['cls_str'])
kw['cls_str'] = lambda tsk: cls_str
kw['color'] = 'CYAN'
return bld(**kw)
@feature('_px4_romfs')
def _process_romfs(self):
bld = self.bld
file_list = (
'firmware/oreoled.bin',
'init.d/rc.APM',
'init.d/rc.error',
'init.d/rcS',
(bld.env.PX4_BOOTLOADER, 'bootloader/fmu_bl.bin'),
)
romfs_src = bld.srcnode.find_dir(bld.env.PX4_ROMFS_SRC)
romfs_bld = bld.bldnode.make_node(bld.env.PX4_ROMFS_BLD)
for item in file_list:
if isinstance(item, str):
src = romfs_src.make_node(item)
dst = romfs_bld.make_node(item)
else:
src = bld.srcnode.make_node(item[0])
dst = romfs_bld.make_node(item[1])
bname = os.path.basename(str(src))
if bname in bld.env.ROMFS_EXCLUDE:
print("Excluding %s" % bname)
continue
dst.parent.mkdir()
self.create_task('px4_copy', src, dst)
def configure(cfg):
cfg.env.CMAKE_MIN_VERSION = '3.2'
cfg.load('cmake')
bldnode = cfg.bldnode.make_node(cfg.variant)
env = cfg.env
env.AP_PROGRAM_FEATURES += ['px4_ap_program']
kw = env.AP_LIBRARIES_OBJECTS_KW
kw['features'] = Utils.to_list(kw.get('features', [])) + ['px4_ap_library']
def srcpath(path):
return cfg.srcnode.make_node(path).abspath()
def bldpath(path):
return bldnode.make_node(path).abspath()
version = env.get_flat('PX4_VERSION')
if env.PX4_VERSION == '1':
bootloader_name = 'px4fmu_bl.bin'
elif env.PX4_VERSION in ['2','3']:
bootloader_name = 'px4fmuv2_bl.bin'
else:
bootloader_name = 'px4fmuv%s_bl.bin' % version
# TODO: we should move stuff from mk/PX4 to Tools/ardupilotwaf/px4 after
# stop using the make-based build system
env.PX4_ROMFS_SRC = 'mk/PX4/ROMFS'
env.PX4_ROMFS_BLD = 'px4-extra-files/ROMFS'
env.PX4_BOOTLOADER = 'mk/PX4/bootloader/%s' % bootloader_name
env.PX4_ADD_GIT_HASHES = srcpath('Tools/scripts/add_git_hashes.py')
env.PX4_APM_ROOT = srcpath('')
env.PX4_ROOT = srcpath('modules/PX4Firmware')
env.PX4_NUTTX_ROOT = srcpath('modules/PX4NuttX')
env.PX4_UAVCAN_ROOT = srcpath('modules/uavcan')
if env.PX4_USE_PX4IO:
env.PX4IO_ELF_DEST = 'px4-extra-files/px4io'
nuttx_config='nuttx_px4fmu-v%s_apm' % version
env.PX4_CMAKE_VARS = dict(
CONFIG=nuttx_config,
CMAKE_MODULE_PATH=srcpath('Tools/ardupilotwaf/px4/cmake'),
UAVCAN_LIBUAVCAN_PATH=env.PX4_UAVCAN_ROOT,
NUTTX_SRC=env.PX4_NUTTX_ROOT,
PX4_NUTTX_ROMFS=bldpath(env.PX4_ROMFS_BLD),
ARDUPILOT_BUILD='YES',
EXTRA_CXX_FLAGS=' '.join((
# NOTE: these "-Wno-error=*" flags should be removed as we update
# the submodule
'-Wno-error=double-promotion',
'-Wno-error=reorder',
# NOTE: *Temporarily* using this definition so that both
# PX4Firmware build systems (cmake and legacy make-based) can live
# together
'-DCMAKE_BUILD',
'-DARDUPILOT_BUILD',
'-I%s' % bldpath('libraries/GCS_MAVLink'),
'-I%s' % bldpath('libraries/GCS_MAVLink/include/mavlink'),
'-Wl,--gc-sections',
)),
EXTRA_C_FLAGS=' '.join((
# NOTE: *Temporarily* using this definition so that both
# PX4Firmware build systems (cmake and legacy make-based) can live
# together
'-DCMAKE_BUILD',
)),
)
def build(bld):
version = bld.env.get_flat('PX4_VERSION')
px4 = bld.cmake(
name='px4',
cmake_src=bld.srcnode.find_dir('modules/PX4Firmware'),
cmake_vars=bld.env.PX4_CMAKE_VARS,
)
px4.build(
'msg_gen',
group='dynamic_sources',
cmake_output_patterns='src/modules/uORB/topics/*.h',
)
px4.build(
'prebuild_targets',
group='dynamic_sources',
cmake_output_patterns='px4fmu-v%s/NuttX/nuttx-export/**/*.h' % version,
)
bld(
name='px4_romfs_static_files',
group='dynamic_sources',
features='_px4_romfs',
)
bld.extra_build_summary = _extra_build_summary
def _extra_build_summary(bld, build_summary):
build_summary.text('')
build_summary.text('PX4')
build_summary.text('', '''
The ELF files are pointed by the path in the "%s" column. The .px4 files are in
the same directory of their corresponding ELF files.
''' % build_summary.header_text['target'])
if not bld.options.upload:
build_summary.text('')
build_summary.text('', '''
You can use the option --upload to upload the firmware to the PX4 board if you
have one connected.''')
if bld.env.PX4_USE_PX4IO:
build_summary.text('')
build_summary.text('PX4IO')
summary_data_list = bld.size_summary([bld.env.PX4IO_ELF_DEST])
header = bld.env.BUILD_SUMMARY_HEADER[:]
try:
header.remove('target')
except ValueError:
pass
header.insert(0, 'binary_path')
build_summary.print_table(summary_data_list, header)
| Aerotenna/APM_OcPoC_Zynq | Tools/ardupilotwaf/px4.py | Python | gpl-3.0 | 11,357 |
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
@pytest.mark.supported_targets
@pytest.mark.generic
def test_timer_group_example(dut: Dut) -> None:
dut.expect(r'Init timer with auto-reload', timeout=5)
res = dut.expect(r'Timer auto reloaded, count value in ISR: (\d+)', timeout=5)
reloaded_count = res.group(1).decode('utf8')
assert 0 <= int(reloaded_count) < 10
alarm_increase_step = 500000
dut.expect(r'Init timer without auto-reload')
for i in range(1, 5):
res = dut.expect(r'Timer alarmed at (\d+)', timeout=3)
alarm_count = res.group(1).decode('utf8')
assert (i * alarm_increase_step - 10) < int(alarm_count) < (i * alarm_increase_step + 10)
| espressif/esp-idf | examples/peripherals/timer_group/legacy_driver/pytest_timer_group.py | Python | apache-2.0 | 808 |
#
# kickstart.py : Apply kickstart configuration to a system
#
# Copyright 2007, Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import os.path
import shutil
import subprocess
import time
import logging
import urlgrabber
import selinux
try:
import system_config_keyboard.keyboard as keyboard
except ImportError:
import rhpl.keyboard as keyboard
import pykickstart.commands as kscommands
import pykickstart.constants as ksconstants
import pykickstart.errors as kserrors
import pykickstart.parser as ksparser
import pykickstart.version as ksversion
import imgcreate.errors as errors
import imgcreate.fs as fs
def read_kickstart(path):
"""Parse a kickstart file and return a KickstartParser instance.
This is a simple utility function which takes a path to a kickstart file,
parses it and returns a pykickstart KickstartParser instance which can
be then passed to an ImageCreator constructor.
If an error occurs, a CreatorError exception is thrown.
"""
version = ksversion.makeVersion()
ks = ksparser.KickstartParser(version)
try:
ksfile = urlgrabber.urlgrab(path)
ks.readKickstart(ksfile)
# Fallback to e.args[0] is a workaround for bugs in urlgragger and pykickstart.
except IOError, e:
raise errors.KickstartError("Failed to read kickstart file "
"'%s' : %s" % (path, e.strerror or
e.args[0]))
except kserrors.KickstartError, e:
raise errors.KickstartError("Failed to parse kickstart file "
"'%s' : %s" % (path, e))
return ks
def build_name(kscfg, prefix = None, suffix = None, maxlen = None):
"""Construct and return an image name string.
This is a utility function to help create sensible name and fslabel
strings. The name is constructed using the sans-prefix-and-extension
kickstart filename and the supplied prefix and suffix.
If the name exceeds the maxlen length supplied, the prefix is first dropped
and then the kickstart filename portion is reduced until it fits. In other
words, the suffix takes precedence over the kickstart portion and the
kickstart portion takes precedence over the prefix.
kscfg -- a path to a kickstart file
prefix -- a prefix to prepend to the name; defaults to None, which causes
no prefix to be used
suffix -- a suffix to append to the name; defaults to None, which causes
a YYYYMMDDHHMM suffix to be used
maxlen -- the maximum length for the returned string; defaults to None,
which means there is no restriction on the name length
Note, if maxlen is less then the len(suffix), you get to keep both pieces.
"""
name = os.path.basename(kscfg)
idx = name.rfind('.')
if idx >= 0:
name = name[:idx]
if prefix is None:
prefix = ""
if suffix is None:
suffix = time.strftime("%Y%m%d%H%M")
if name.startswith(prefix):
name = name[len(prefix):]
ret = prefix + name + "-" + suffix
if not maxlen is None and len(ret) > maxlen:
ret = name[:maxlen - len(suffix) - 1] + "-" + suffix
return ret
class KickstartConfig(object):
"""A base class for applying kickstart configurations to a system."""
def __init__(self, instroot):
self.instroot = instroot
def path(self, subpath):
return self.instroot + subpath
def chroot(self):
os.chroot(self.instroot)
os.chdir("/")
def call(self, args):
if not os.path.exists("%s/%s" %(self.instroot, args[0])):
raise errors.KickstartError("Unable to run %s!" %(args))
subprocess.call(args, preexec_fn = self.chroot)
def apply(self):
pass
class LanguageConfig(KickstartConfig):
"""A class to apply a kickstart language configuration to a system."""
def apply(self, kslang):
lang = kslang.lang or "en_US.UTF-8"
f = open(self.path("/etc/locale.conf"), "w+")
f.write("LANG=\"" + lang + "\"\n")
f.close()
class KeyboardConfig(KickstartConfig):
"""A class to apply a kickstart keyboard configuration to a system."""
def apply(self, kskeyboard):
vcconf_file = self.path("/etc/vconsole.conf")
DEFAULT_VC_FONT = "latarcyrheb-sun16"
if not kskeyboard.keyboard:
kskeyboard.keyboard = "us"
try:
with open(vcconf_file, "w") as f:
f.write('KEYMAP="%s"\n' % kskeyboard.keyboard)
# systemd now defaults to a font that cannot display non-ascii
# characters, so we have to tell it to use a better one
f.write('FONT="%s"\n' % DEFAULT_VC_FONT)
except IOError as e:
logging.error("Cannot write vconsole configuration file: %s" % e)
class TimezoneConfig(KickstartConfig):
"""A class to apply a kickstart timezone configuration to a system."""
def apply(self, kstimezone):
tz = kstimezone.timezone or "America/New_York"
utc = str(kstimezone.isUtc)
# /etc/localtime is a symlink with glibc > 2.15-41
# but if it exists as a file keep it as a file and fall back
# to a symlink.
localtime = self.path("/etc/localtime")
if os.path.isfile(localtime) and \
not os.path.islink(localtime):
try:
shutil.copy2(self.path("/usr/share/zoneinfo/%s" %(tz,)),
localtime)
except (OSError, shutil.Error) as e:
logging.error("Error copying timezone: %s" %(e.strerror,))
else:
if os.path.exists(localtime):
os.unlink(localtime)
os.symlink("/usr/share/zoneinfo/%s" %(tz,), localtime)
class AuthConfig(KickstartConfig):
"""A class to apply a kickstart authconfig configuration to a system."""
def apply(self, ksauthconfig):
if not os.path.exists(self.path("/usr/sbin/authconfig")):
return
auth = ksauthconfig.authconfig or "--useshadow --enablemd5"
args = ["/usr/sbin/authconfig", "--update", "--nostart"]
self.call(args + auth.split())
class FirewallConfig(KickstartConfig):
"""A class to apply a kickstart firewall configuration to a system."""
def apply(self, ksfirewall):
args = ["/usr/bin/firewall-offline-cmd"]
# enabled is None if neither --enable or --disable is passed
# default to enabled if nothing has been set.
if ksfirewall.enabled == False:
args += ["--disabled"]
else:
args += ["--enabled"]
for dev in ksfirewall.trusts:
args += [ "--trust=%s" % (dev,) ]
for port in ksfirewall.ports:
args += [ "--port=%s" % (port,) ]
for service in ksfirewall.services:
args += [ "--service=%s" % (service,) ]
self.call(args)
class RootPasswordConfig(KickstartConfig):
"""A class to apply a kickstart root password configuration to a system."""
def lock(self):
self.call(["/usr/bin/passwd", "-l", "root"])
def set_encrypted(self, password):
self.call(["/usr/sbin/usermod", "-p", password, "root"])
def set_unencrypted(self, password):
for p in ("/bin/echo", "/usr/bin/passwd"):
if not os.path.exists("%s/%s" %(self.instroot, p)):
raise errors.KickstartError("Unable to set unencrypted password due to lack of %s" % p)
p1 = subprocess.Popen(["/bin/echo", password],
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2 = subprocess.Popen(["/usr/bin/passwd", "--stdin", "root"],
stdin = p1.stdout,
stdout = subprocess.PIPE,
preexec_fn = self.chroot)
p2.communicate()
def apply(self, ksrootpw):
if ksrootpw.isCrypted:
self.set_encrypted(ksrootpw.password)
elif ksrootpw.password != "":
self.set_unencrypted(ksrootpw.password)
if ksrootpw.lock:
self.lock()
class ServicesConfig(KickstartConfig):
"""A class to apply a kickstart services configuration to a system."""
def apply(self, ksservices):
if not os.path.exists(self.path("/sbin/chkconfig")):
return
for s in ksservices.enabled:
self.call(["/sbin/chkconfig", s, "on"])
for s in ksservices.disabled:
self.call(["/sbin/chkconfig", s, "off"])
class XConfig(KickstartConfig):
"""A class to apply a kickstart X configuration to a system."""
RUNLEVELS = {3: 'multi-user.target', 5: 'graphical.target'}
def apply(self, ksxconfig):
if ksxconfig.defaultdesktop:
f = open(self.path("/etc/sysconfig/desktop"), "w")
f.write("DESKTOP="+ksxconfig.defaultdesktop+"\n")
f.close()
if ksxconfig.startX:
if not os.path.isdir(self.path('/etc/systemd/system')):
logging.warning("there is no /etc/systemd/system directory, cannot update default.target!")
return
default_target = self.path('/etc/systemd/system/default.target')
if os.path.islink(default_target):
os.unlink(default_target)
os.symlink('/lib/systemd/system/graphical.target', default_target)
class RPMMacroConfig(KickstartConfig):
"""A class to apply the specified rpm macros to the filesystem"""
def apply(self, ks):
if not ks:
return
f = open(self.path("/etc/rpm/macros.imgcreate"), "w+")
if exclude_docs(ks):
f.write("%_excludedocs 1\n")
if not selinux_enabled(ks):
f.write("%__file_context_path %{nil}\n")
if inst_langs(ks) != None:
f.write("%_install_langs ")
f.write(inst_langs(ks))
f.write("\n")
f.close()
class NetworkConfig(KickstartConfig):
"""A class to apply a kickstart network configuration to a system."""
def write_ifcfg(self, network):
p = self.path("/etc/sysconfig/network-scripts/ifcfg-" + network.device)
f = file(p, "w+")
os.chmod(p, 0644)
f.write("DEVICE=%s\n" % network.device)
f.write("BOOTPROTO=%s\n" % network.bootProto)
if network.bootProto.lower() == "static":
if network.ip:
f.write("IPADDR=%s\n" % network.ip)
if network.netmask:
f.write("NETMASK=%s\n" % network.netmask)
if network.onboot:
f.write("ONBOOT=on\n")
else:
f.write("ONBOOT=off\n")
if network.essid:
f.write("ESSID=%s\n" % network.essid)
if network.ethtool:
if network.ethtool.find("autoneg") == -1:
network.ethtool = "autoneg off " + network.ethtool
f.write("ETHTOOL_OPTS=%s\n" % network.ethtool)
if network.bootProto.lower() == "dhcp":
if network.hostname:
f.write("DHCP_HOSTNAME=%s\n" % network.hostname)
if network.dhcpclass:
f.write("DHCP_CLASSID=%s\n" % network.dhcpclass)
if network.mtu:
f.write("MTU=%s\n" % network.mtu)
f.close()
def write_wepkey(self, network):
if not network.wepkey:
return
p = self.path("/etc/sysconfig/network-scripts/keys-" + network.device)
f = file(p, "w+")
os.chmod(p, 0600)
f.write("KEY=%s\n" % network.wepkey)
f.close()
def write_sysconfig(self, useipv6, hostname, gateway):
path = self.path("/etc/sysconfig/network")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("NETWORKING=yes\n")
if useipv6:
f.write("NETWORKING_IPV6=yes\n")
else:
f.write("NETWORKING_IPV6=no\n")
if gateway:
f.write("GATEWAY=%s\n" % gateway)
f.close()
def write_hosts(self, hostname):
localline = ""
if hostname and hostname != "localhost.localdomain":
localline += hostname + " "
l = hostname.split(".")
if len(l) > 1:
localline += l[0] + " "
localline += "localhost.localdomain localhost"
path = self.path("/etc/hosts")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("127.0.0.1\t\t%s\n" % localline)
f.write("::1\t\tlocalhost6.localdomain6 localhost6\n")
f.close()
def write_hostname(self, hostname):
if not hostname:
return
path = self.path("/etc/hostname")
f = file(path, "w+")
os.chmod(path, 0644)
f.write("%s\n" % (hostname,))
f.close()
def write_resolv(self, nodns, nameservers):
if nodns or not nameservers:
return
path = self.path("/etc/resolv.conf")
f = file(path, "w+")
os.chmod(path, 0644)
for ns in (nameservers):
if ns:
f.write("nameserver %s\n" % ns)
f.close()
def apply(self, ksnet):
fs.makedirs(self.path("/etc/sysconfig/network-scripts"))
useipv6 = False
nodns = False
hostname = None
gateway = None
nameservers = None
for network in ksnet.network:
if not network.device:
raise errors.KickstartError("No --device specified with "
"network kickstart command")
if (network.onboot and network.bootProto.lower() != "dhcp" and
not (network.ip and network.netmask)):
raise errors.KickstartError("No IP address and/or netmask "
"specified with static "
"configuration for '%s'" %
network.device)
self.write_ifcfg(network)
self.write_wepkey(network)
if network.ipv6:
useipv6 = True
if network.nodns:
nodns = True
if network.hostname:
hostname = network.hostname
if network.gateway:
gateway = network.gateway
if network.nameserver:
nameservers = network.nameserver.split(",")
self.write_sysconfig(useipv6, hostname, gateway)
self.write_hosts(hostname)
self.write_hostname(hostname)
self.write_resolv(nodns, nameservers)
class SelinuxConfig(KickstartConfig):
"""A class to apply a kickstart selinux configuration to a system."""
def relabel(self, ksselinux):
# touch some files which get unhappy if they're not labeled correctly
for fn in ("/etc/resolv.conf",):
path = self.path(fn)
f = file(path, "w+")
os.chmod(path, 0644)
f.close()
if ksselinux.selinux == ksconstants.SELINUX_DISABLED:
return
if not os.path.exists(self.path("/sbin/setfiles")):
return
self.call(["/sbin/setfiles", "-F", "-p", "-e", "/proc", "-e", "/sys", "-e", "/dev", "/etc/selinux/clip/contexts/files/file_contexts", "/"])
def apply(self, ksselinux):
selinux_config = "/etc/selinux/config"
if not os.path.exists(self.instroot+selinux_config):
return
if ksselinux.selinux == ksconstants.SELINUX_ENFORCING:
cmd = "SELINUX=enforcing\n"
elif ksselinux.selinux == ksconstants.SELINUX_PERMISSIVE:
cmd = "SELINUX=permissive\n"
elif ksselinux.selinux == ksconstants.SELINUX_DISABLED:
cmd = "SELINUX=disabled\n"
else:
return
# Replace the SELINUX line in the config
lines = open(self.instroot+selinux_config).readlines()
with open(self.instroot+selinux_config, "w") as f:
for line in lines:
if line.startswith("SELINUX="):
f.write(cmd)
else:
f.write(line)
self.relabel(ksselinux)
def get_image_size(ks, default = None):
__size = 0
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.size:
__size = p.size
if __size > 0:
return int(__size) * 1024L * 1024L
else:
return default
def get_image_fstype(ks, default = None):
for p in ks.handler.partition.partitions:
if p.mountpoint == "/" and p.fstype:
return p.fstype
return default
def get_modules(ks):
devices = []
if not hasattr(ks.handler.device, "deviceList"):
devices.append(ks.handler.device)
else:
devices.extend(ks.handler.device.deviceList)
modules = []
for device in devices:
if not device.moduleName:
continue
modules.extend(device.moduleName.split(":"))
return modules
def get_timeout(ks, default = None):
if not hasattr(ks.handler.bootloader, "timeout"):
return default
if ks.handler.bootloader.timeout is None:
return default
return int(ks.handler.bootloader.timeout)
# Drop quiet since we don't want noisy boot info
def get_kernel_args(ks, default = "ro rd.live.image rd.live.ram"):
if not hasattr(ks.handler.bootloader, "appendLine"):
return default
if ks.handler.bootloader.appendLine is None:
return default
return "%s %s" %(default, ks.handler.bootloader.appendLine)
def get_default_kernel(ks, default = None):
if not hasattr(ks.handler.bootloader, "default"):
return default
if not ks.handler.bootloader.default:
return default
return ks.handler.bootloader.default
def get_repos(ks, repo_urls = {}):
repos = {}
for repo in ks.handler.repo.repoList:
inc = []
if hasattr(repo, "includepkgs"):
inc.extend(repo.includepkgs)
exc = []
if hasattr(repo, "excludepkgs"):
exc.extend(repo.excludepkgs)
baseurl = repo.baseurl
mirrorlist = repo.mirrorlist
proxy = repo.proxy
sslverify = not repo.noverifyssl
if repo.name in repo_urls:
baseurl = repo_urls[repo.name]
mirrorlist = None
if repos.has_key(repo.name):
logging.warn("Overriding already specified repo %s" %(repo.name,))
repos[repo.name] = (repo.name, baseurl, mirrorlist, proxy, inc, exc, repo.cost, sslverify)
return repos.values()
def convert_method_to_repo(ks):
try:
ks.handler.repo.methodToRepo()
except (AttributeError, kserrors.KickstartError):
pass
def get_packages(ks, required = []):
return ks.handler.packages.packageList + required
def get_groups(ks, required = []):
return ks.handler.packages.groupList + required
def get_excluded(ks, required = []):
return ks.handler.packages.excludedList + required
def get_partitions(ks, required = []):
return ks.handler.partition.partitions
def ignore_missing(ks):
return ks.handler.packages.handleMissing == ksconstants.KS_MISSING_IGNORE
def exclude_docs(ks):
return ks.handler.packages.excludeDocs
def inst_langs(ks):
if hasattr(ks.handler.packages, "instLange"):
return ks.handler.packages.instLange
elif hasattr(ks.handler.packages, "instLangs"):
return ks.handler.packages.instLangs
return ""
def get_pre_scripts(ks):
scripts = []
for s in ks.handler.scripts:
if s.type != ksparser.KS_SCRIPT_PRE:
continue
scripts.append(s)
return scripts
def get_post_scripts(ks):
scripts = []
for s in ks.handler.scripts:
if s.type != ksparser.KS_SCRIPT_POST:
continue
scripts.append(s)
return scripts
def selinux_enabled(ks):
return ks.handler.selinux.selinux in (ksconstants.SELINUX_ENFORCING,
ksconstants.SELINUX_PERMISSIVE)
| ykhodorkovskiy/clip | packages/livecd-tools/livecd-tools-20.6/imgcreate/kickstart.py | Python | apache-2.0 | 20,666 |
"""The Environment Canada (EC) component."""
from functools import partial
import logging
from env_canada import ECData, ECRadar
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from .const import CONF_LANGUAGE, CONF_STATION, DOMAIN
PLATFORMS = ["camera", "sensor", "weather"]
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry):
"""Set up EC as config entry."""
lat = config_entry.data.get(CONF_LATITUDE)
lon = config_entry.data.get(CONF_LONGITUDE)
station = config_entry.data.get(CONF_STATION)
lang = config_entry.data.get(CONF_LANGUAGE, "English")
weather_api = {}
weather_init = partial(
ECData, station_id=station, coordinates=(lat, lon), language=lang.lower()
)
weather_data = await hass.async_add_executor_job(weather_init)
weather_api["weather_data"] = weather_data
radar_init = partial(ECRadar, coordinates=(lat, lon))
radar_data = await hass.async_add_executor_job(radar_init)
weather_api["radar_data"] = radar_data
await hass.async_add_executor_job(radar_data.get_loop)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = weather_api
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
def trigger_import(hass, config):
"""Trigger a import of YAML config into a config_entry."""
_LOGGER.warning(
"Environment Canada YAML configuration is deprecated; your YAML configuration "
"has been imported into the UI and can be safely removed"
)
if not config.get(CONF_LANGUAGE):
config[CONF_LANGUAGE] = "English"
data = {}
for key in (
CONF_STATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_LANGUAGE,
): # pylint: disable=consider-using-tuple
if config.get(key):
data[key] = config[key]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=data
)
)
| lukas-hetzenecker/home-assistant | homeassistant/components/environment_canada/__init__.py | Python | apache-2.0 | 2,354 |
# -*- coding: utf-8 -*-
from datetime import timedelta
import logging
import re
from django.utils import timezone
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.db.utils import IntegrityError
from activeusers import utils
from activeusers.models import Visitor
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
class MiddlewareMixin(object):
pass
title_re = re.compile('<title>(.*?)</title>')
log = logging.getLogger('activeusers.middleware')
class VisitorTrackingMiddleware(MiddlewareMixin):
"""
Keeps track of your active users. Anytime a visitor accesses a valid URL,
their unique record will be updated with the page they're on and the last
time they requested a page.
Records are considered to be unique when the session key and IP address
are unique together. Sometimes the same user used to have two different
records, so I added a check to see if the session key had changed for the
same IP and user agent in the last 5 minutes
"""
def process_request(self, request):
# don't process AJAX requests
if request.is_ajax():
return
# create some useful variables
ip_address = utils.get_ip(request)
user_agent = request.META.get('HTTP_USER_AGENT', '')[:255]
if hasattr(request, 'session'):
# use the current session key if we can
session_key = request.session.session_key
if session_key is None:
# The session must not be stored yet.
# We will wait until we have a session_key on the next
# request. This has a nice side-effect of not attempting
# to track hit-and-runners like spiders and bots.
return
else:
# otherwise just fake a session key
session_key = '%s:%s' % (ip_address, user_agent)
prefixes = getattr(settings, 'ACTIVEUSERS_IGNORE_PREFIXES', [])
# ensure that the request.path does not begin with any of the prefixes
for prefix in prefixes:
if request.path.startswith(prefix):
log.debug('Not tracking request to: %s' % request.path)
return
# if we get here, the URL needs to be tracked
# determine what time it is
now = timezone.now()
attrs = {
'session_key': session_key,
'ip_address': ip_address
}
# for some reason, Visitor.objects.get_or_create was not working here
try:
visitor = Visitor.objects.get(**attrs)
except Visitor.DoesNotExist:
# see if there's a visitor with the same IP and user agent
# within the last 5 minutes
cutoff = now - timedelta(minutes=5)
visitors = Visitor.objects.filter(
ip_address=ip_address,
user_agent=user_agent,
last_update__gte=cutoff
)
if len(visitors):
visitor = visitors[0]
visitor.session_key = session_key
log.debug('Using existing visitor for IP %s / UA %s: %s'
% (ip_address, user_agent, visitor.id))
else:
# it's probably safe to assume that the visitor is brand new
visitor = Visitor(**attrs)
log.debug('Created a new visitor: %s' % attrs)
except StandardError:
return
# determine whether or not the user is logged in
user = request.user
if isinstance(user, AnonymousUser):
user = None
# update the tracking information
visitor.user = user
visitor.user_agent = user_agent
# if the visitor record is new, or the visitor hasn't been here for
# at least an hour, update their referrer URL
one_hour_ago = now - timedelta(hours=1)
if not visitor.last_update or visitor.last_update <= one_hour_ago:
referrer = request.META.get('HTTP_REFERER', 'unknown')
visitor.referrer = utils.u_clean(referrer[:255])
# reset the number of pages they've been to
visitor.page_views = 0
visitor.session_start = now
visitor.url = request.path
visitor.page_views += 1
visitor.last_update = now
try:
visitor.save()
except IntegrityError:
# If we received an IntegrityError on ``session_key`` not unique,
# it's probably because Django tried to do an INSERT, but another
# request from the same User was able to INSERT ahead of us.
# Try again with an UPDATE query.
visitor.id = Visitor.objects.get(session_key=visitor.session_key).id
visitor.save(force_update=True)
class VisitorCleanUpMiddleware:
"""Clean up old visitor tracking records in the database"""
def process_request(self, request):
last_clean_time = cache.get('activeusers_last_cleanup')
now = timezone.now()
x_minutes_ago = now - timedelta(minutes=int(utils.get_timeout()) / 2)
if not last_clean_time or last_clean_time <= x_minutes_ago:
cache.set('activeusers_last_cleanup', now)
timeout = utils.get_cleanup_timeout()
if str(timeout).isdigit():
timeout = timezone.now() - timedelta(hours=int(timeout))
Visitor.objects.filter(last_update__lte=timeout).delete()
| arteria/django-activeusers | activeusers/middleware.py | Python | mit | 5,591 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os
import time
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from desktop.lib.parameterization import find_variables
from desktop.models import Document2
from hadoop import cluster
from hadoop.fs.hadoopfs import Hdfs
from liboozie.oozie_api import get_oozie
from liboozie.conf import REMOTE_DEPLOYMENT_DIR
from liboozie.credentials import Credentials
LOG = logging.getLogger(__name__)
def submit_dryrun(run_func):
def decorate(self, deployment_dir=None):
if self.oozie_id is not None:
raise Exception(_("Submission already submitted (Oozie job id %s)") % (self.oozie_id,))
jt_address = cluster.get_cluster_addr_for_job_submission()
if deployment_dir is None:
self._update_properties(jt_address) # Needed as we need to set some properties like Credentials before
deployment_dir = self.deploy()
self._update_properties(jt_address, deployment_dir)
if self.properties.get('dryrun'):
self.api.dryrun(self.properties)
return run_func(self, deployment_dir)
return wraps(run_func)(decorate)
class Submission(object):
"""
Represents one unique Oozie submission.
Actions are:
- submit
- rerun
"""
def __init__(self, user, job=None, fs=None, jt=None, properties=None, oozie_id=None):
self.job = job
self.user = user
self.fs = fs
self.jt = jt # Deprecated with YARN, we now use logical names only for RM
self.oozie_id = oozie_id
self.api = get_oozie(self.user)
if properties is not None:
self.properties = properties
else:
self.properties = {}
self.properties['security_enabled'] = self.api.security_enabled
def __str__(self):
if self.oozie_id:
res = "Submission for job '%s'." % (self.oozie_id,)
else:
res = "Submission for job '%s' (id %s, owner %s)." % (self.job.name, self.job.id, self.user)
if self.oozie_id:
res += " -- " + self.oozie_id
return res
@submit_dryrun
def run(self, deployment_dir=None):
"""
Take care of all the actions of submitting a Oozie workflow.
Returns the oozie job id if all goes well.
"""
self.oozie_id = self.api.submit_job(self.properties)
LOG.info("Submitted: %s" % (self,))
if self._is_workflow():
self.api.job_control(self.oozie_id, 'start')
LOG.info("Started: %s" % (self,))
return self.oozie_id
def rerun(self, deployment_dir, fail_nodes=None, skip_nodes=None):
jt_address = cluster.get_cluster_addr_for_job_submission()
self._update_properties(jt_address, deployment_dir)
self.properties.update({'oozie.wf.application.path': deployment_dir})
if fail_nodes:
self.properties.update({'oozie.wf.rerun.failnodes': fail_nodes})
elif not skip_nodes:
self.properties.update({'oozie.wf.rerun.failnodes': 'false'}) # Case empty 'skip_nodes' list
else:
self.properties.update({'oozie.wf.rerun.skip.nodes': skip_nodes})
self.api.rerun(self.oozie_id, properties=self.properties)
LOG.info("Rerun: %s" % (self,))
return self.oozie_id
def rerun_coord(self, deployment_dir, params):
jt_address = cluster.get_cluster_addr_for_job_submission()
self._update_properties(jt_address, deployment_dir)
self.properties.update({'oozie.coord.application.path': deployment_dir})
self.api.job_control(self.oozie_id, action='coord-rerun', properties=self.properties, parameters=params)
LOG.info("Rerun: %s" % (self,))
return self.oozie_id
def update_coord(self):
self.api = get_oozie(self.user, api_version="v2")
self.api.job_control(self.oozie_id, action='update', properties=self.properties, parameters=None)
LOG.info("Update: %s" % (self,))
return self.oozie_id
def rerun_bundle(self, deployment_dir, params):
jt_address = cluster.get_cluster_addr_for_job_submission()
self._update_properties(jt_address, deployment_dir)
self.properties.update({'oozie.bundle.application.path': deployment_dir})
self.api.job_control(self.oozie_id, action='bundle-rerun', properties=self.properties, parameters=params)
LOG.info("Rerun: %s" % (self,))
return self.oozie_id
def deploy(self):
try:
deployment_dir = self._create_deployment_dir()
except Exception, ex:
msg = _("Failed to create deployment directory: %s" % ex)
LOG.exception(msg)
raise PopupException(message=msg, detail=str(ex))
if self.api.security_enabled:
jt_address = cluster.get_cluster_addr_for_job_submission()
self._update_properties(jt_address) # Needed for coordinator deploying workflows with credentials
if hasattr(self.job, 'nodes'):
for action in self.job.nodes:
# Make sure XML is there
# Don't support more than one level sub-workflow
if action.data['type'] == 'subworkflow':
from oozie.models2 import Workflow
workflow = Workflow(document=Document2.objects.get(uuid=action.data['properties']['workflow']))
sub_deploy = Submission(self.user, workflow, self.fs, self.jt, self.properties)
workspace = sub_deploy.deploy()
self.job.override_subworkflow_id(action, workflow.id) # For displaying the correct graph
self.properties['workspace_%s' % workflow.uuid] = workspace # For pointing to the correct workspace
oozie_xml = self.job.to_xml(self.properties)
self._do_as(self.user.username, self._copy_files, deployment_dir, oozie_xml, self.properties)
return deployment_dir
def get_external_parameters(self, application_path):
"""From XML and job.properties HDFS files"""
deployment_dir = os.path.dirname(application_path)
xml = self.fs.do_as_user(self.user, self.fs.read, application_path, 0, 1 * 1024**2)
properties_file = deployment_dir + '/job.properties'
if self.fs.do_as_user(self.user, self.fs.exists, properties_file):
properties = self.fs.do_as_user(self.user, self.fs.read, properties_file, 0, 1 * 1024**2)
else:
properties = None
return self._get_external_parameters(xml, properties)
def _get_external_parameters(self, xml, properties=None):
from oozie.models import DATASET_FREQUENCY
parameters = dict([(var, '') for var in find_variables(xml, include_named=False) if not self._is_coordinator() or var not in DATASET_FREQUENCY])
if properties:
parameters.update(dict([line.strip().split('=')
for line in properties.split('\n') if not line.startswith('#') and len(line.strip().split('=')) == 2]))
return parameters
def _update_properties(self, jobtracker_addr, deployment_dir=None):
LOG.info('Using FS %s and JT %s' % (self.fs, self.jt))
if self.jt and self.jt.logical_name:
jobtracker_addr = self.jt.logical_name
if self.fs.logical_name:
fs_defaultfs = self.fs.logical_name
else:
fs_defaultfs = self.fs.fs_defaultfs
self.properties.update({
'jobTracker': jobtracker_addr,
'nameNode': fs_defaultfs,
})
if self.job and deployment_dir:
self.properties.update({
self.job.PROPERTY_APP_PATH: self.fs.get_hdfs_path(deployment_dir),
self.job.HUE_ID: self.job.id
})
# Generate credentials when using security
if self.api.security_enabled:
credentials = Credentials()
credentials.fetch(self.api)
self.properties['credentials'] = credentials.get_properties()
def _create_deployment_dir(self):
"""
Return the job deployment directory in HDFS, creating it if necessary.
The actual deployment dir should be 0711 owned by the user
"""
# Automatic setup of the required directories if needed
create_directories(self.fs)
# Case of a shared job
if self.user != self.job.document.owner:
path = REMOTE_DEPLOYMENT_DIR.get().replace('$USER', self.user.username).replace('$TIME', str(time.time())).replace('$JOBID', str(self.job.id))
# Shared coords or bundles might not have any existing workspaces
if self.fs.exists(self.job.deployment_dir):
self.fs.copy_remote_dir(self.job.deployment_dir, path, owner=self.user)
else:
self._create_dir(path)
else:
path = self.job.deployment_dir
self._create_dir(path)
return path
def _create_dir(self, path, perms=None):
"""
Return the directory in HDFS, creating it if necessary.
"""
try:
statbuf = self.fs.stats(path)
if not statbuf.isDir:
msg = _("Path is not a directory: %s.") % (path,)
LOG.error(msg)
raise Exception(msg)
except IOError, ex:
if ex.errno != errno.ENOENT:
msg = _("Error accessing directory '%s': %s.") % (path, ex)
LOG.exception(msg)
raise IOError(ex.errno, msg)
if not self.fs.exists(path):
self._do_as(self.user.username, self.fs.mkdir, path, perms)
if perms is not None:
self._do_as(self.user.username, self.fs.chmod, path, perms)
return path
def _copy_files(self, deployment_dir, oozie_xml, oozie_properties):
"""
Copy XML and the jar_path files from Java or MR actions to the deployment directory.
This should run as the workflow user.
"""
self._create_file(deployment_dir, self.job.XML_FILE_NAME, oozie_xml)
self._create_file(deployment_dir, 'job.properties', data='\n'.join(['%s=%s' % (key, val) for key, val in oozie_properties.iteritems()]))
# List jar files
files = []
lib_path = self.fs.join(deployment_dir, 'lib')
if hasattr(self.job, 'nodes'):
for node in self.job.nodes:
jar_path = node.data['properties'].get('jar_path')
if jar_path:
if not jar_path.startswith('/'): # If workspace relative path
jar_path = self.fs.join(self.job.deployment_dir, jar_path)
if not jar_path.startswith(lib_path): # If not already in lib
files.append(jar_path)
# Copy the jar files to the workspace lib
if files:
for jar_file in files:
LOG.debug("Updating %s" % jar_file)
jar_lib_path = self.fs.join(lib_path, self.fs.basename(jar_file))
# Refresh if needed
if self.fs.exists(jar_lib_path) and self.fs.exists(jar_file):
stat_src = self.fs.stats(jar_file)
stat_dest = self.fs.stats(jar_lib_path)
if stat_src.fileId != stat_dest.fileId:
self.fs.remove(jar_lib_path, skip_trash=True)
self.fs.copyfile(jar_file, jar_lib_path)
def _do_as(self, username, fn, *args, **kwargs):
prev_user = self.fs.user
try:
self.fs.setuser(username)
return fn(*args, **kwargs)
finally:
self.fs.setuser(prev_user)
def remove_deployment_dir(self):
"""Delete the workflow deployment directory."""
try:
path = self.job.deployment_dir
if self._do_as(self.user.username , self.fs.exists, path):
self._do_as(self.user.username , self.fs.rmtree, path)
except Exception, ex:
LOG.warn("Failed to clean up workflow deployment directory for "
"%s (owner %s). Caused by: %s",
self.job.name, self.user, ex)
def _is_workflow(self):
from oozie.models2 import Workflow
return Workflow.PROPERTY_APP_PATH in self.properties
def _is_coordinator(self):
from oozie.models2 import Coordinator
return Coordinator.PROPERTY_APP_PATH in self.properties
def _create_file(self, deployment_dir, file_name, data, do_as=False):
file_path = self.fs.join(deployment_dir, file_name)
if do_as:
self.fs.do_as_user(self.user, self.fs.create, file_path, overwrite=True, permission=0644, data=smart_str(data))
else:
self.fs.create(file_path, overwrite=True, permission=0644, data=smart_str(data))
LOG.debug("Created/Updated %s" % (file_path,))
def create_directories(fs, directory_list=[]):
# If needed, create the remote home, deployment and data directories
directories = [REMOTE_DEPLOYMENT_DIR.get()] + directory_list
for directory in directories:
if not fs.do_as_user(fs.DEFAULT_USER, fs.exists, directory):
remote_home_dir = Hdfs.join('/user', fs.DEFAULT_USER)
if directory.startswith(remote_home_dir):
# Home is 755
fs.do_as_user(fs.DEFAULT_USER, fs.create_home_dir, remote_home_dir)
# Shared by all the users
fs.do_as_user(fs.DEFAULT_USER, fs.mkdir, directory, 01777)
fs.do_as_user(fs.DEFAULT_USER, fs.chmod, directory, 01777) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
| yoer/hue | desktop/libs/liboozie/src/liboozie/submission2.py | Python | apache-2.0 | 13,378 |
from office365.onedrive.conflictBehavior import ConflictBehavior
from office365.onedrive.fileSystemInfo import FileSystemInfo
from office365.onedrive.uploadSession import UploadSession
from office365.runtime.client_query import ServiceOperationQuery, CreateEntityQuery
from office365.runtime.client_result import ClientResult
from office365.runtime.resource_path import ResourcePath
from office365.onedrive.baseItem import BaseItem
from office365.onedrive.listItem import ListItem
class DriveItem(BaseItem):
"""The driveItem resource represents a file, folder, or other item stored in a drive. All file system objects in
OneDrive and SharePoint are returned as driveItem resources """
def create_upload_session(self, item):
"""Creates a temporary storage location where the bytes of the file will be saved until the complete file is
uploaded. """
result = ClientResult(UploadSession())
qry = ServiceOperationQuery(self,
"createUploadSession",
None,
{
"item": item
},
None,
result
)
self.context.add_query(qry)
return result
def upload(self, name, content):
"""The simple upload API allows you to provide the contents of a new file or update the contents of an
existing file in a single API call. This method only supports files up to 4MB in size. """
from office365.graphClient import UploadContentQuery
qry = UploadContentQuery(self, name, content)
self.context.add_query(qry)
return qry.returnType
def download(self):
"""Download the contents of the primary stream (file) of a DriveItem. Only driveItems with the file property
can be downloaded. """
from office365.graphClient import DownloadContentQuery
qry = DownloadContentQuery(self)
self.context.add_query(qry)
return qry.returnType
def create_folder(self, name):
"""Create a new folder or DriveItem in a Drive with a specified parent item or path."""
drive_item = DriveItem(self.context, None)
drive_item._parent_collection = self.children
payload = {
"name": name,
"folder": {},
"@microsoft.graph.conflictBehavior": ConflictBehavior.Rename
}
qry = CreateEntityQuery(self.children, payload, drive_item)
self.context.add_query(qry)
return drive_item
def convert(self, format_name):
"""Converts the contents of an item in a specific format"""
from office365.graphClient import DownloadContentQuery
qry = DownloadContentQuery(self, format_name)
self.context.add_query(qry)
return qry.returnType
def copy(self, name, parent_reference=None):
"""Asynchronously creates a copy of an driveItem (including any children), under a new parent item or with a
new name. """
result = ClientResult(None)
qry = ServiceOperationQuery(self,
"copy",
None,
{
"name": name,
"parentReference": parent_reference
},
None,
result
)
self.context.add_query(qry)
return result
def move(self, name, parent_reference=None):
"""To move a DriveItem to a new parent item, your app requests to update the parentReference of the DriveItem
to move. """
from office365.graphClient import ReplaceMethodQuery
result = ClientResult(None)
qry = ReplaceMethodQuery(self,
"move",
None,
{
"name": name,
"parentReference": parent_reference
},
None,
result
)
self.context.add_query(qry)
return result
def search(self, query_text):
"""Search the hierarchy of items for items matching a query. You can search within a folder hierarchy,
a whole drive, or files shared with the current user. """
from office365.graphClient import SearchQuery
result = ClientResult(None)
qry = SearchQuery(self, query_text, result)
self.context.add_query(qry)
return result
@property
def fileSystemInfo(self):
"""File system information on client."""
if self.is_property_available('fileSystemInfo'):
return FileSystemInfo(self.properties['fileSystemInfo'])
else:
return None
@property
def children(self):
"""Collection containing Item objects for the immediate children of Item. Only items representing folders
have children."""
if self.is_property_available('children'):
return self.properties['children']
else:
from office365.onedrive.driveItemCollection import DriveItemCollection
return DriveItemCollection(self.context, ResourcePath("children", self.resourcePath))
@property
def listItem(self):
"""For drives in SharePoint, the associated document library list item."""
if self.is_property_available('listItem'):
return self.properties['listItem']
else:
return ListItem(self.context, ResourcePath("listItem", self.resourcePath))
def set_property(self, name, value, persist_changes=True):
super(DriveItem, self).set_property(name, value, persist_changes)
if name == "id" and self._resource_path.parent.segment == "children":
self._resource_path = ResourcePath(
value,
ResourcePath("items", self._parent_collection.resourcePath.parent.parent))
| vgrem/SharePointOnline-REST-Python-Client | office365/onedrive/driveItem.py | Python | mit | 6,316 |
# coding=utf-8
from .Axes import Axes
from .TimeLine import TimeLine
from .LockView import LockView
from .Colorbar import Colorbar
# from LinkColorbar import LinkColorbar
# from LinkView import LinkView
from .Overlay import Overlay
from .Transform import Transform
from .SwitchBetweenGridAndStackedView import SwitchBetweenGridAndStackedView
color = 'blue'
show = True
tools = (LockView, Colorbar, Axes, TimeLine,
Overlay, Transform,
SwitchBetweenGridAndStackedView)
| radjkarl/dataArtist | dataArtist/figures/image/tools/view/__init__.py | Python | gpl-3.0 | 486 |
"""
A fake TwistedBot!
"""
import re
class Logger():
def log(self, *args):
pass
class TestedBot:
bot_messages = []
logger = Logger()
functions = []
messages = {}
__funcs = {}
def __init__(self):
pass
def msg(self, channel, message):
self.bot_messages.append((channel, message))
def register(self, func, name=None):
self.functions.append(func)
if name:
self.__funcs[name] = func.rule
def rules(self):
messages = ["The rules and functions are as follows:"]
for func in self.__funcs:
messages.append(" %s = %s" % (func, self.__funcs[func]))
return messages
def last_message(self):
if len(self.bot_messages):
return self.bot_messages[-1]
def listen(self, usr, channel, message):
for func in self.functions:
if re.match(func.rule, message):
func(self, usr, channel, message)
return self.bot_messages
| andimiller/twistedbot | test/fake_tbot.py | Python | mit | 1,008 |
##############################################################################
#
# Copyright (C) 2019 jeo Software (http://www.jeosoft.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Product Create Restriction',
'version': '11.0.0.0.0',
'category': 'Tools',
'summary': "Restrict product creation to enabled users",
'author': "jeo Software",
'website': 'http://github.com/jobiols/module-repo',
'license': 'AGPL-3',
'depends': [
'product'
],
'data': [
'security/security_groups.xml'
],
'installable': True,
'application': False,
}
| jobiols/odoo-addons | product_create_restriction/__manifest__.py | Python | agpl-3.0 | 1,368 |
#!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import sys
import time
class OverridingStream(object):
def __init__(self, stream=None):
self.stream = stream
self._last_line_length = None
def write(self, line):
# with open("mylog", "a") as f:
# f.write(line + '\n')
if self._last_line_length is None:
self.stream.write("{}".format(line))
else:
self.stream.write("\r{}\r{}".format(" " * self._last_line_length, line))
self.stream.flush()
self._last_line_length = len(line)
class ProgressBar(object):
PRE = '['
POST = ']'
BLOCK = '#'
EMPTY = ' '
MESSAGE = "{current_fraction:.1%} "
def __init__(self,
limit=100.0,
*,
length=70,
current=0.0,
increment=1.0,
message=None,
pre=None,
post=None,
block=None,
empty=None,
stream=None,
maximum=None,
delay=None,
**render_args):
self.limit = limit
self.current = current
self.increment = increment
self.length = length
if message is None:
message = self.MESSAGE
self.message = message
if pre is None:
pre = self.PRE
self.pre = pre
if post is None:
post = self.POST
self.post = post
if block is None:
block = self.BLOCK
self.block = block
if empty is None:
empty = self.EMPTY
self.empty = empty
if stream is None:
stream = OverridingStream(sys.stdout)
self.stream = stream
self.maximum = maximum
self.delay = delay
self.render_args = render_args
def get_line(self,
*,
current=None,
increment=None,
limit=None,
message=None,
**render_args):
if limit is not None:
self.limit = limit
if message is not None:
self.message = message
previous = self.current
if current is None:
if increment is None:
increment = self.increment
current = self.current + increment
self.current = current
if self.maximum and self.current > self.maximum:
self.current = self.maximum
if self.current > self.limit:
self.limit = self.current
increment = self.current - previous
current_fraction = self.current / self.limit
missing_fraction = 1.0 - current_fraction
format_d = dict(
current_fraction=current_fraction,
missing_fraction=missing_fraction,
current=self.current,
limit=self.limit,
increment=increment,
missing=self.limit - self.current)
format_d.update(self.render_args)
format_d.update(render_args)
message = self.message.format(**format_d)
pre = self.pre.format(**format_d)
post = self.post.format(**format_d)
fixed_length = len(message) + len(pre) + len(post)
variable_length = self.length - fixed_length
if self.limit == 0.0:
block_fraction = 0
else:
block_fraction = self.current / self.limit
block_length = int(round(block_fraction * variable_length))
empty_length = variable_length - block_length
block_num = (block_length + len(self.block) - 1) // len(self.block)
block = (self.block * block_num)[:block_length]
empty_num = (empty_length + len(self.empty) - 1) // len(self.empty)
empty = (self.empty * empty_num)[:empty_length]
line = message + pre + block + empty + post
return line
def render(self, **n_args):
self.stream.write(self.get_line(**n_args))
if self.delay:
time.sleep(self.delay)
def initialize(self):
self.render(current=self.current)
def finalize(self):
self.stream.write("")
def sub_progress_bar(self, intervals, *, next_current=None, next_increment=None, **render_args):
if next_current is None:
if next_increment is None:
next_increment = self.increment
next_current = self.current + next_increment
next_increment = next_current - self.current
sub_current = self.current
sub_increment = next_increment / intervals
sub_limit = self.limit
sub_maximum = next_current
args = self.render_args.copy()
args.update(render_args)
#print()
#print("-" * 80)
#print("-" * 80)
#print("self: current={}, increment={}, limit={}, maximum={}".format(self.current, self.increment, self.limit, self.maximum))
#print("sub: current={}, increment={}, limit={}, maximum={}".format(sub_current, sub_increment, sub_limit, sub_maximum))
#print("sub: intervals={}".format(intervals))
return self.__class__(
length=self.length,
current=sub_current,
increment=sub_increment,
maximum=sub_maximum,
limit=sub_limit,
message=self.message,
pre=self.pre,
post=self.post,
block=self.block,
empty=self.empty,
stream=self.stream,
delay=self.delay,
**args)
if __name__ == "__main__":
import time
progress_bar = ProgressBar(increment=10.0, post=" {current}")
print("inizio...")
progress_bar.initialize()
time.sleep(0.5)
progress_bar.render()
time.sleep(0.5)
progress_bar.render(current=12.5)
time.sleep(0.5)
progress_bar.render(increment=55.6)
time.sleep(0.5)
progress_bar.render()
time.sleep(0.5)
progress_bar.render()
time.sleep(0.5)
progress_bar.render()
time.sleep(0.5)
progress_bar.render()
time.sleep(0.5)
progress_bar.render()
time.sleep(0.5)
progress_bar.finalize()
print("finito")
| simone-campagna/statcode | lib/python/statcode/progressbar.py | Python | apache-2.0 | 6,925 |
import datetime as dt
from unittest import SkipTest, skipIf
import numpy as np
from holoviews import (
Dimension, Curve, Points, Image, Dataset, RGB, Path, Graph, TriMesh,
QuadMesh, NdOverlay, Contours, Spikes, Spread, Area, Rectangles,
Segments, Polygons, Nodes
)
from holoviews.streams import Tap
from holoviews.element.comparison import ComparisonTestCase
from numpy import nan
try:
import datashader as ds
import dask.dataframe as dd
import xarray as xr
from holoviews.core.util import pd
from holoviews.operation.datashader import (
aggregate, regrid, ds_version, stack, directly_connect_edges,
shade, spread, rasterize, datashade, AggregationOperation,
inspect, inspect_points, inspect_polygons
)
except:
raise SkipTest('Datashader not available')
try:
import cudf
import cupy
except:
cudf = None
try:
import spatialpandas
except:
spatialpandas = None
spatialpandas_skip = skipIf(spatialpandas is None, "SpatialPandas not available")
cudf_skip = skipIf(cudf is None, "cuDF not available")
import logging
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
AggregationOperation.vdim_prefix = ''
class DatashaderAggregateTests(ComparisonTestCase):
"""
Tests for datashader aggregation
"""
def test_aggregate_points(self):
points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
self.assertEqual(img, expected)
def test_aggregate_points_count_column(self):
points = Points([(0.2, 0.3, np.NaN), (0.4, 0.7, 22), (0, 0.99,np.NaN)], vdims='z')
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2, aggregator=ds.count('z'))
expected = Image(([0.25, 0.75], [0.25, 0.75], [[0, 0], [1, 0]]),
vdims=[Dimension('z Count', nodata=0)])
self.assertEqual(img, expected)
@cudf_skip
def test_aggregate_points_cudf(self):
points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)], datatype=['cuDF'])
self.assertIsInstance(points.data, cudf.DataFrame)
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
self.assertIsInstance(img.data.Count.data, cupy.ndarray)
self.assertEqual(img, expected)
def test_aggregate_zero_range_points(self):
p = Points([(0, 0), (1, 1)])
agg = rasterize(p, x_range=(0, 0), y_range=(0, 1), expand=False, dynamic=False,
width=2, height=2)
img = Image(([], [0.25, 0.75], np.zeros((2, 0))), bounds=(0, 0, 0, 1),
xdensity=1, vdims=[Dimension('Count', nodata=0)])
self.assertEqual(agg, img)
def test_aggregate_points_target(self):
points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
img = aggregate(points, dynamic=False, target=expected)
self.assertEqual(img, expected)
def test_aggregate_points_sampling(self):
points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
x_sampling=0.5, y_sampling=0.5)
self.assertEqual(img, expected)
def test_aggregate_points_categorical(self):
points = Points([(0.2, 0.3, 'A'), (0.4, 0.7, 'B'), (0, 0.99, 'C')], vdims='z')
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2, aggregator=ds.count_cat('z'))
xs, ys = [0.25, 0.75], [0.25, 0.75]
expected = NdOverlay({'A': Image((xs, ys, [[1, 0], [0, 0]]), vdims=Dimension('z Count', nodata=0)),
'B': Image((xs, ys, [[0, 0], [1, 0]]), vdims=Dimension('z Count', nodata=0)),
'C': Image((xs, ys, [[0, 0], [1, 0]]), vdims=Dimension('z Count', nodata=0))},
kdims=['z'])
self.assertEqual(img, expected)
def test_aggregate_points_categorical_zero_range(self):
points = Points([(0.2, 0.3, 'A'), (0.4, 0.7, 'B'), (0, 0.99, 'C')], vdims='z')
img = aggregate(points, dynamic=False, x_range=(0, 0), y_range=(0, 1),
aggregator=ds.count_cat('z'), height=2)
xs, ys = [], [0.25, 0.75]
params = dict(bounds=(0, 0, 0, 1), xdensity=1)
expected = NdOverlay({'A': Image((xs, ys, np.zeros((2, 0))), vdims=Dimension('z Count', nodata=0), **params),
'B': Image((xs, ys, np.zeros((2, 0))), vdims=Dimension('z Count', nodata=0), **params),
'C': Image((xs, ys, np.zeros((2, 0))), vdims=Dimension('z Count', nodata=0), **params)},
kdims=['z'])
self.assertEqual(img, expected)
def test_aggregate_curve(self):
curve = Curve([(0.2, 0.3), (0.4, 0.7), (0.8, 0.99)])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [1, 1]]),
vdims=[Dimension('Count', nodata=0)])
img = aggregate(curve, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_aggregate_curve_datetimes(self):
dates = pd.date_range(start="2016-01-01", end="2016-01-03", freq='1D')
curve = Curve((dates, [1, 2, 3]))
img = aggregate(curve, width=2, height=2, dynamic=False)
bounds = (np.datetime64('2016-01-01T00:00:00.000000'), 1.0,
np.datetime64('2016-01-03T00:00:00.000000'), 3.0)
dates = [np.datetime64('2016-01-01T12:00:00.000000000'),
np.datetime64('2016-01-02T12:00:00.000000000')]
expected = Image((dates, [1.5, 2.5], [[1, 0], [0, 2]]),
datatype=['xarray'], bounds=bounds, vdims=Dimension('Count', nodata=0))
self.assertEqual(img, expected)
def test_aggregate_curve_datetimes_dask(self):
df = pd.DataFrame(
data=np.arange(1000), columns=['a'],
index=pd.date_range('2019-01-01', freq='1T', periods=1000),
)
ddf = dd.from_pandas(df, npartitions=4)
curve = Curve(ddf, kdims=['index'], vdims=['a'])
img = aggregate(curve, width=2, height=3, dynamic=False)
bounds = (np.datetime64('2019-01-01T00:00:00.000000'), 0.0,
np.datetime64('2019-01-01T16:39:00.000000'), 999.0)
dates = [np.datetime64('2019-01-01T04:09:45.000000000'),
np.datetime64('2019-01-01T12:29:15.000000000')]
expected = Image((dates, [166.5, 499.5, 832.5], [[332, 0], [167, 166], [0, 334]]),
kdims=['index', 'a'], vdims=Dimension('Count', nodata=0),
datatype=['xarray'], bounds=bounds)
self.assertEqual(img, expected)
def test_aggregate_curve_datetimes_microsecond_timebase(self):
dates = pd.date_range(start="2016-01-01", end="2016-01-03", freq='1D')
xstart = np.datetime64('2015-12-31T23:59:59.723518000', 'us')
xend = np.datetime64('2016-01-03T00:00:00.276482000', 'us')
curve = Curve((dates, [1, 2, 3]))
img = aggregate(curve, width=2, height=2, x_range=(xstart, xend), dynamic=False)
bounds = (np.datetime64('2015-12-31T23:59:59.723518'), 1.0,
np.datetime64('2016-01-03T00:00:00.276482'), 3.0)
dates = [np.datetime64('2016-01-01T11:59:59.861759000',),
np.datetime64('2016-01-02T12:00:00.138241000')]
expected = Image((dates, [1.5, 2.5], [[1, 0], [0, 2]]),
datatype=['xarray'], bounds=bounds, vdims=Dimension('Count', nodata=0))
self.assertEqual(img, expected)
def test_aggregate_ndoverlay_count_cat_datetimes_microsecond_timebase(self):
dates = pd.date_range(start="2016-01-01", end="2016-01-03", freq='1D')
xstart = np.datetime64('2015-12-31T23:59:59.723518000', 'us')
xend = np.datetime64('2016-01-03T00:00:00.276482000', 'us')
curve = Curve((dates, [1, 2, 3]))
curve2 = Curve((dates, [3, 2, 1]))
ndoverlay = NdOverlay({0: curve, 1: curve2}, 'Cat')
imgs = aggregate(ndoverlay, aggregator=ds.count_cat('Cat'), width=2, height=2,
x_range=(xstart, xend), dynamic=False)
bounds = (np.datetime64('2015-12-31T23:59:59.723518'), 1.0,
np.datetime64('2016-01-03T00:00:00.276482'), 3.0)
dates = [np.datetime64('2016-01-01T11:59:59.861759000',),
np.datetime64('2016-01-02T12:00:00.138241000')]
expected = Image((dates, [1.5, 2.5], [[1, 0], [0, 2]]),
datatype=['xarray'], bounds=bounds, vdims=Dimension('Count', nodata=0))
expected2 = Image((dates, [1.5, 2.5], [[0, 1], [1, 1]]),
datatype=['xarray'], bounds=bounds, vdims=Dimension('Count', nodata=0))
self.assertEqual(imgs[0], expected)
self.assertEqual(imgs[1], expected2)
def test_aggregate_dt_xaxis_constant_yaxis(self):
df = pd.DataFrame({'y': np.ones(100)}, index=pd.date_range('1980-01-01', periods=100, freq='1T'))
img = rasterize(Curve(df), dynamic=False, width=3)
xs = np.array(['1980-01-01T00:16:30.000000', '1980-01-01T00:49:30.000000',
'1980-01-01T01:22:30.000000'], dtype='datetime64[us]')
ys = np.array([])
bounds = (np.datetime64('1980-01-01T00:00:00.000000'), 1.0,
np.datetime64('1980-01-01T01:39:00.000000'), 1.0)
expected = Image((xs, ys, np.empty((0, 3))), ['index', 'y'],
vdims=Dimension('Count', nodata=0), xdensity=1,
ydensity=1, bounds=bounds)
self.assertEqual(img, expected)
def test_aggregate_ndoverlay(self):
ds = Dataset([(0.2, 0.3, 0), (0.4, 0.7, 1), (0, 0.99, 2)], kdims=['x', 'y', 'z'])
ndoverlay = ds.to(Points, ['x', 'y'], [], 'z').overlay()
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
img = aggregate(ndoverlay, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_aggregate_path(self):
path = Path([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 1]]),
vdims=[Dimension('Count', nodata=0)])
img = aggregate(path, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_aggregate_contours_with_vdim(self):
contours = Contours([[(0.2, 0.3, 1), (0.4, 0.7, 1)], [(0.4, 0.7, 2), (0.8, 0.99, 2)]], vdims='z')
img = rasterize(contours, dynamic=False)
self.assertEqual(img.vdims, ['z'])
def test_aggregate_contours_without_vdim(self):
contours = Contours([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]])
img = rasterize(contours, dynamic=False)
self.assertEqual(img.vdims, [Dimension('Any', nodata=0)])
def test_aggregate_dframe_nan_path(self):
path = Path([Path([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]]).dframe()])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 1]]),
vdims=[Dimension('Count', nodata=0)])
img = aggregate(path, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_spikes_aggregate_count(self):
spikes = Spikes([1, 2, 3])
agg = rasterize(spikes, width=5, dynamic=False, expand=False)
expected = Image(np.array([[1, 0, 1, 0, 1]]), vdims=Dimension('Count', nodata=0),
xdensity=2.5, ydensity=1, bounds=(1, 0, 3, 0.5))
self.assertEqual(agg, expected)
def test_spikes_aggregate_count_dask(self):
spikes = Spikes([1, 2, 3], datatype=['dask'])
agg = rasterize(spikes, width=5, dynamic=False, expand=False)
expected = Image(np.array([[1, 0, 1, 0, 1]]), vdims=Dimension('Count', nodata=0),
xdensity=2.5, ydensity=1, bounds=(1, 0, 3, 0.5))
self.assertEqual(agg, expected)
def test_spikes_aggregate_dt_count(self):
spikes = Spikes([dt.datetime(2016, 1, 1), dt.datetime(2016, 1, 2), dt.datetime(2016, 1, 3)])
agg = rasterize(spikes, width=5, dynamic=False, expand=False)
bounds = (np.datetime64('2016-01-01T00:00:00.000000'), 0,
np.datetime64('2016-01-03T00:00:00.000000'), 0.5)
expected = Image(np.array([[1, 0, 1, 0, 1]]), vdims=Dimension('Count', nodata=0), bounds=bounds)
self.assertEqual(agg, expected)
def test_spikes_aggregate_dt_count_dask(self):
spikes = Spikes([dt.datetime(2016, 1, 1), dt.datetime(2016, 1, 2), dt.datetime(2016, 1, 3)],
datatype=['dask'])
agg = rasterize(spikes, width=5, dynamic=False, expand=False)
bounds = (np.datetime64('2016-01-01T00:00:00.000000'), 0,
np.datetime64('2016-01-03T00:00:00.000000'), 0.5)
expected = Image(np.array([[1, 0, 1, 0, 1]]), vdims=Dimension('Count', nodata=0), bounds=bounds)
self.assertEqual(agg, expected)
def test_spikes_aggregate_spike_length(self):
spikes = Spikes([1, 2, 3])
agg = rasterize(spikes, width=5, dynamic=False, expand=False, spike_length=7)
expected = Image(np.array([[1, 0, 1, 0, 1]]), vdims=Dimension('Count', nodata=0),
xdensity=2.5, ydensity=1, bounds=(1, 0, 3, 7.0))
self.assertEqual(agg, expected)
def test_spikes_aggregate_with_height_count(self):
spikes = Spikes([(1, 0.2), (2, 0.8), (3, 0.4)], vdims='y')
agg = rasterize(spikes, width=5, height=5, y_range=(0, 1), dynamic=False)
xs = [1.2, 1.6, 2.0, 2.4, 2.8]
ys = [0.1, 0.3, 0.5, 0.7, 0.9]
arr = np.array([
[1, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_spikes_aggregate_with_height_count_override(self):
spikes = Spikes([(1, 0.2), (2, 0.8), (3, 0.4)], vdims='y')
agg = rasterize(spikes, width=5, height=5, y_range=(0, 1),
spike_length=0.3, dynamic=False)
xs = [1.2, 1.6, 2.0, 2.4, 2.8]
ys = [0.1, 0.3, 0.5, 0.7, 0.9]
arr = np.array([[1, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_rasterize_regrid_and_spikes_overlay(self):
img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))
spikes = Spikes([(0.5, 0.2), (1.5, 0.8), ], vdims='y')
expected_regrid = Image(([0.25, 0.75, 1.25, 1.75],
[0.25, 0.75, 1.25, 1.75],
[[0, 0, 1, 1],
[0, 0, 1, 1],
[2, 2, 3, 3],
[2, 2, 3, 3]]))
spikes_arr = np.array([[0, 1, 0, 1],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 0, 0, 0]])
expected_spikes = Image(([0.25, 0.75, 1.25, 1.75],
[0.25, 0.75, 1.25, 1.75], spikes_arr), vdims=Dimension('Count', nodata=0))
overlay = img * spikes
agg = rasterize(overlay, width=4, height=4, x_range=(0, 2), y_range=(0, 2),
spike_length=0.5, upsample=True, dynamic=False)
self.assertEqual(agg.Image.I, expected_regrid)
self.assertEqual(agg.Spikes.I, expected_spikes)
def test_spikes_aggregate_with_height_count_dask(self):
spikes = Spikes([(1, 0.2), (2, 0.8), (3, 0.4)], vdims='y', datatype=['dask'])
agg = rasterize(spikes, width=5, height=5, y_range=(0, 1), dynamic=False)
xs = [1.2, 1.6, 2.0, 2.4, 2.8]
ys = [0.1, 0.3, 0.5, 0.7, 0.9]
arr = np.array([
[1, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_spikes_aggregate_with_negative_height_count(self):
spikes = Spikes([(1, -0.2), (2, -0.8), (3, -0.4)], vdims='y', datatype=['dask'])
agg = rasterize(spikes, width=5, height=5, y_range=(-1, 0), dynamic=False)
xs = [1.2, 1.6, 2.0, 2.4, 2.8]
ys = [-0.9, -0.7, -0.5, -0.3, -0.1]
arr = np.array([
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 1]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_spikes_aggregate_with_positive_and_negative_height_count(self):
spikes = Spikes([(1, -0.2), (2, 0.8), (3, -0.4)], vdims='y', datatype=['dask'])
agg = rasterize(spikes, width=5, height=5, y_range=(-1, 1), dynamic=False)
xs = [1.2, 1.6, 2.0, 2.4, 2.8]
ys = [-0.8, -0.4, 0.0, 0.4, 0.8]
arr = np.array([
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 1, 0, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_rectangles_aggregate_count(self):
rects = Rectangles([(0, 0, 1, 2), (1, 1, 3, 2)])
agg = rasterize(rects, width=4, height=4, dynamic=False)
xs = [0.375, 1.125, 1.875, 2.625]
ys = [0.25, 0.75, 1.25, 1.75]
arr = np.array([
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 2, 1, 1],
[0, 0, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_rectangles_aggregate_count_cat(self):
rects = Rectangles([(0, 0, 1, 2, 'A'), (1, 1, 3, 2, 'B')], vdims=['cat'])
agg = rasterize(rects, width=4, height=4, aggregator=ds.count_cat('cat'),
dynamic=False)
xs = [0.375, 1.125, 1.875, 2.625]
ys = [0.25, 0.75, 1.25, 1.75]
arr1 = np.array([
[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]
])
arr2 = np.array([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]
])
expected1 = Image((xs, ys, arr1), vdims=Dimension('cat Count', nodata=0))
expected2 = Image((xs, ys, arr2), vdims=Dimension('cat Count', nodata=0))
expected = NdOverlay({'A': expected1, 'B': expected2}, kdims=['cat'])
self.assertEqual(agg, expected)
def test_rectangles_aggregate_sum(self):
rects = Rectangles([(0, 0, 1, 2, 0.5), (1, 1, 3, 2, 1.5)], vdims=['value'])
agg = rasterize(rects, width=4, height=4, aggregator='sum', dynamic=False)
xs = [0.375, 1.125, 1.875, 2.625]
ys = [0.25, 0.75, 1.25, 1.75]
nan = np.nan
arr = np.array([
[0.5, 0.5, nan, nan],
[0.5, 0.5, nan, nan],
[0.5, 2. , 1.5, 1.5],
[nan, nan, nan, nan]
])
expected = Image((xs, ys, arr), vdims='value')
self.assertEqual(agg, expected)
def test_rectangles_aggregate_dt_count(self):
rects = Rectangles([
(0, dt.datetime(2016, 1, 2), 4, dt.datetime(2016, 1, 3)),
(1, dt.datetime(2016, 1, 1), 2, dt.datetime(2016, 1, 5))
])
agg = rasterize(rects, width=4, height=4, dynamic=False)
xs = [0.5, 1.5, 2.5, 3.5]
ys = [
np.datetime64('2016-01-01T12:00:00'), np.datetime64('2016-01-02T12:00:00'),
np.datetime64('2016-01-03T12:00:00'), np.datetime64('2016-01-04T12:00:00')
]
arr = np.array([
[0, 1, 1, 0],
[1, 2, 2, 1],
[0, 1, 1, 0],
[0, 0, 0, 0]
])
bounds = (0.0, np.datetime64('2016-01-01T00:00:00'),
4.0, np.datetime64('2016-01-05T00:00:00'))
expected = Image((xs, ys, arr), bounds=bounds, vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_segments_aggregate_count(self):
segments = Segments([(0, 1, 4, 1), (1, 0, 1, 4)])
agg = rasterize(segments, width=4, height=4, dynamic=False)
xs = [0.5, 1.5, 2.5, 3.5]
ys = [0.5, 1.5, 2.5, 3.5]
arr = np.array([
[0, 1, 0, 0],
[1, 2, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_segments_aggregate_sum(self, instance=False):
segments = Segments([(0, 1, 4, 1, 2), (1, 0, 1, 4, 4)], vdims=['value'])
if instance:
agg = rasterize.instance(
width=10, height=10, dynamic=False, aggregator='sum'
)(segments, width=4, height=4)
else:
agg = rasterize(
segments, width=4, height=4, dynamic=False, aggregator='sum'
)
xs = [0.5, 1.5, 2.5, 3.5]
ys = [0.5, 1.5, 2.5, 3.5]
na = np.nan
arr = np.array([
[na, 4, na, na],
[2 , 6, 2 , 2 ],
[na, 4, na, na],
[na, 4, na, na]
])
expected = Image((xs, ys, arr), vdims='value')
self.assertEqual(agg, expected)
def test_segments_aggregate_sum_instance(self):
self.test_segments_aggregate_sum(instance=True)
def test_segments_aggregate_dt_count(self):
segments = Segments([
(0, dt.datetime(2016, 1, 2), 4, dt.datetime(2016, 1, 2)),
(1, dt.datetime(2016, 1, 1), 1, dt.datetime(2016, 1, 5))
])
agg = rasterize(segments, width=4, height=4, dynamic=False)
xs = [0.5, 1.5, 2.5, 3.5]
ys = [
np.datetime64('2016-01-01T12:00:00'), np.datetime64('2016-01-02T12:00:00'),
np.datetime64('2016-01-03T12:00:00'), np.datetime64('2016-01-04T12:00:00')
]
arr = np.array([
[0, 1, 0, 0],
[1, 2, 1, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]
])
bounds = (0.0, np.datetime64('2016-01-01T00:00:00'),
4.0, np.datetime64('2016-01-05T00:00:00'))
expected = Image((xs, ys, arr), bounds=bounds, vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_area_aggregate_simple_count(self):
area = Area([1, 2, 1])
agg = rasterize(area, width=4, height=4, y_range=(0, 3), dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.375, 1.125, 1.875, 2.625]
arr = np.array([
[1, 1, 1, 1],
[1, 1, 1, 1],
[0, 1, 1, 0],
[0, 0, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_area_aggregate_negative_count(self):
area = Area([-1, -2, -3])
agg = rasterize(area, width=4, height=4, y_range=(-3, 0), dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [-2.625, -1.875, -1.125, -0.375]
arr = np.array([
[0, 0, 0, 1],
[0, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_area_aggregate_crossover_count(self):
area = Area([-1, 2, 3])
agg = rasterize(area, width=4, height=4, y_range=(-3, 3), dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [-2.25, -0.75, 0.75, 2.25]
arr = np.array([
[0, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 1, 1],
[0, 0, 1, 1]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_spread_aggregate_symmetric_count(self):
spread = Spread([(0, 1, 0.8), (1, 2, 0.3), (2, 3, 0.8)])
agg = rasterize(spread, width=4, height=4, dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.65, 1.55, 2.45, 3.35]
arr = np.array([
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 0, 1]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_spread_aggregate_assymmetric_count(self):
spread = Spread([(0, 1, 0.4, 0.8), (1, 2, 0.8, 0.4), (2, 3, 0.5, 1)],
vdims=['y', 'pos', 'neg'])
agg = rasterize(spread, width=4, height=4, dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.6125, 1.4375, 2.2625, 3.0875]
arr = np.array([
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 1, 0],
[0, 0, 1, 1]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
def test_rgb_regrid_packed(self):
coords = {'x': [1, 2], 'y': [1, 2], 'band': [0, 1, 2]}
arr = np.array([
[[255, 10],
[ 0, 30]],
[[ 1, 0],
[ 0, 0]],
[[127, 0],
[ 0, 68]],
]).T
da = xr.DataArray(data=arr, dims=('x', 'y', 'band'), coords=coords)
im = RGB(da, ['x', 'y'])
agg = rasterize(im, width=3, height=3, dynamic=False, upsample=True)
xs = [0.8333333, 1.5, 2.166666]
ys = [0.8333333, 1.5, 2.166666]
arr = np.array([
[[255, 255, 10],
[255, 255, 10],
[ 0, 0, 30]],
[[ 1, 1, 0],
[ 1, 1, 0],
[ 0, 0, 0]],
[[127, 127, 0],
[127, 127, 0],
[ 0, 0, 68]],
]).transpose((1, 2, 0))
expected = RGB((xs, ys, arr))
self.assertEqual(agg, expected)
@spatialpandas_skip
def test_line_rasterize(self):
path = Path([[(0, 0), (1, 1), (2, 0)], [(0, 0), (0, 1)]], datatype=['spatialpandas'])
agg = rasterize(path, width=4, height=4, dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.125, 0.375, 0.625, 0.875]
arr = np.array([
[2, 0, 0, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 0, 1, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
@spatialpandas_skip
def test_multi_line_rasterize(self):
path = Path([{'x': [0, 1, 2, np.nan, 0, 0], 'y': [0, 1, 0, np.nan, 0, 1]}],
datatype=['spatialpandas'])
agg = rasterize(path, width=4, height=4, dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.125, 0.375, 0.625, 0.875]
arr = np.array([
[2, 0, 0, 1],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 0, 1, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
@spatialpandas_skip
def test_ring_rasterize(self):
path = Path([{'x': [0, 1, 2], 'y': [0, 1, 0], 'geom_type': 'Ring'}], datatype=['spatialpandas'])
agg = rasterize(path, width=4, height=4, dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.125, 0.375, 0.625, 0.875]
arr = np.array([
[1, 1, 1, 1],
[0, 1, 0, 1],
[0, 1, 1, 0],
[0, 0, 1, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
@spatialpandas_skip
def test_polygon_rasterize(self):
poly = Polygons([
{'x': [0, 1, 2], 'y': [0, 1, 0],
'holes': [[[(1.6, 0.2), (1, 0.8), (0.4, 0.2)]]]}
])
agg = rasterize(poly, width=6, height=6, dynamic=False)
xs = [0.166667, 0.5, 0.833333, 1.166667, 1.5, 1.833333]
ys = [0.083333, 0.25, 0.416667, 0.583333, 0.75, 0.916667]
arr = np.array([
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
@spatialpandas_skip
def test_polygon_rasterize_mean_agg(self):
poly = Polygons([
{'x': [0, 1, 2], 'y': [0, 1, 0], 'z': 2.4},
{'x': [0, 0, 1], 'y': [0, 1, 1], 'z': 3.6}
], vdims='z')
agg = rasterize(poly, width=4, height=4, dynamic=False, aggregator='mean')
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.125, 0.375, 0.625, 0.875]
arr = np.array([
[ 2.4, 2.4, 2.4, 2.4],
[ 3.6, 2.4, 2.4, np.nan],
[ 3.6, 2.4, 2.4, np.nan],
[ 3.6, 3.6, np.nan, np.nan]])
expected = Image((xs, ys, arr), vdims='z')
self.assertEqual(agg, expected)
@spatialpandas_skip
def test_multi_poly_rasterize(self):
poly = Polygons([{'x': [0, 1, 2, np.nan, 0, 0, 1],
'y': [0, 1, 0, np.nan, 0, 1, 1]}],
datatype=['spatialpandas'])
agg = rasterize(poly, width=4, height=4, dynamic=False)
xs = [0.25, 0.75, 1.25, 1.75]
ys = [0.125, 0.375, 0.625, 0.875]
arr = np.array([
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 0],
[1, 1, 0, 0]
])
expected = Image((xs, ys, arr), vdims=Dimension('Count', nodata=0))
self.assertEqual(agg, expected)
class DatashaderCatAggregateTests(ComparisonTestCase):
def setUp(self):
if ds_version < '0.11.0':
raise SkipTest('Regridding operations require datashader>=0.11.0')
def test_aggregate_points_categorical(self):
points = Points([(0.2, 0.3, 'A'), (0.4, 0.7, 'B'), (0, 0.99, 'C')], vdims='z')
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2, aggregator=ds.by('z', ds.count()))
xs, ys = [0.25, 0.75], [0.25, 0.75]
expected = NdOverlay({'A': Image((xs, ys, [[1, 0], [0, 0]]), vdims=Dimension('z Count', nodata=0)),
'B': Image((xs, ys, [[0, 0], [1, 0]]), vdims=Dimension('z Count', nodata=0)),
'C': Image((xs, ys, [[0, 0], [1, 0]]), vdims=Dimension('z Count', nodata=0))},
kdims=['z'])
self.assertEqual(img, expected)
def test_aggregate_points_categorical_mean(self):
points = Points([(0.2, 0.3, 'A', 0.1), (0.4, 0.7, 'B', 0.2), (0, 0.99, 'C', 0.3)], vdims=['cat', 'z'])
img = aggregate(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2, aggregator=ds.by('cat', ds.mean('z')))
xs, ys = [0.25, 0.75], [0.25, 0.75]
expected = NdOverlay({'A': Image((xs, ys, [[0.1, nan], [nan, nan]]), vdims='z'),
'B': Image((xs, ys, [[nan, nan], [0.2, nan]]), vdims='z'),
'C': Image((xs, ys, [[nan, nan], [0.3, nan]]), vdims='z')},
kdims=['cat'])
self.assertEqual(img, expected)
class DatashaderShadeTests(ComparisonTestCase):
def test_shade_categorical_images_xarray(self):
xs, ys = [0.25, 0.75], [0.25, 0.75]
data = NdOverlay({'A': Image((xs, ys, np.array([[1, 0], [0, 0]], dtype='u4')),
datatype=['xarray'], vdims=Dimension('z Count', nodata=0)),
'B': Image((xs, ys, np.array([[0, 0], [1, 0]], dtype='u4')),
datatype=['xarray'], vdims=Dimension('z Count', nodata=0)),
'C': Image((xs, ys, np.array([[0, 0], [1, 0]], dtype='u4')),
datatype=['xarray'], vdims=Dimension('z Count', nodata=0))},
kdims=['z'])
shaded = shade(data)
r = [[228, 120], [66, 120]]
g = [[26, 109], [150, 109]]
b = [[28, 95], [129, 95]]
a = [[40, 0], [255, 0]]
expected = RGB((xs, ys, r, g, b, a), datatype=['grid'],
vdims=RGB.vdims+[Dimension('A', range=(0, 1))])
self.assertEqual(shaded, expected)
def test_shade_categorical_images_grid(self):
xs, ys = [0.25, 0.75], [0.25, 0.75]
data = NdOverlay({'A': Image((xs, ys, np.array([[1, 0], [0, 0]], dtype='u4')),
datatype=['grid'], vdims=Dimension('z Count', nodata=0)),
'B': Image((xs, ys, np.array([[0, 0], [1, 0]], dtype='u4')),
datatype=['grid'], vdims=Dimension('z Count', nodata=0)),
'C': Image((xs, ys, np.array([[0, 0], [1, 0]], dtype='u4')),
datatype=['grid'], vdims=Dimension('z Count', nodata=0))},
kdims=['z'])
shaded = shade(data)
r = [[228, 120], [66, 120]]
g = [[26, 109], [150, 109]]
b = [[28, 95], [129, 95]]
a = [[40, 0], [255, 0]]
expected = RGB((xs, ys, r, g, b, a), datatype=['grid'],
vdims=RGB.vdims+[Dimension('A', range=(0, 1))])
self.assertEqual(shaded, expected)
def test_shade_dt_xaxis_constant_yaxis(self):
df = pd.DataFrame({'y': np.ones(100)}, index=pd.date_range('1980-01-01', periods=100, freq='1T'))
rgb = shade(rasterize(Curve(df), dynamic=False, width=3))
xs = np.array(['1980-01-01T00:16:30.000000', '1980-01-01T00:49:30.000000',
'1980-01-01T01:22:30.000000'], dtype='datetime64[us]')
ys = np.array([])
bounds = (np.datetime64('1980-01-01T00:00:00.000000'), 1.0,
np.datetime64('1980-01-01T01:39:00.000000'), 1.0)
expected = RGB((xs, ys, np.empty((0, 3, 4))), ['index', 'y'],
xdensity=1, ydensity=1, bounds=bounds)
self.assertEqual(rgb, expected)
class DatashaderRegridTests(ComparisonTestCase):
"""
Tests for datashader aggregation
"""
def setUp(self):
if ds_version <= '0.5.0':
raise SkipTest('Regridding operations require datashader>=0.6.0')
def test_regrid_mean(self):
img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))
regridded = regrid(img, width=2, height=2, dynamic=False)
expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))
self.assertEqual(regridded, expected)
def test_regrid_mean_xarray_transposed(self):
img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T),
datatype=['xarray'])
img.data = img.data.transpose()
regridded = regrid(img, width=2, height=2, dynamic=False)
expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))
self.assertEqual(regridded, expected)
def test_regrid_rgb_mean(self):
arr = (np.arange(10) * np.arange(5)[np.newaxis].T).astype('f')
rgb = RGB((range(10), range(5), arr, arr*2, arr*2))
regridded = regrid(rgb, width=2, height=2, dynamic=False)
new_arr = np.array([[1.6, 5.6], [6.4, 22.4]])
expected = RGB(([2., 7.], [0.75, 3.25], new_arr, new_arr*2, new_arr*2), datatype=['xarray'])
self.assertEqual(regridded, expected)
def test_regrid_max(self):
img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))
regridded = regrid(img, aggregator='max', width=2, height=2, dynamic=False)
expected = Image(([2., 7.], [0.75, 3.25], [[8, 18], [16, 36]]))
self.assertEqual(regridded, expected)
def test_regrid_upsampling(self):
img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))
regridded = regrid(img, width=4, height=4, upsample=True, dynamic=False)
expected = Image(([0.25, 0.75, 1.25, 1.75], [0.25, 0.75, 1.25, 1.75],
[[0, 0, 1, 1],
[0, 0, 1, 1],
[2, 2, 3, 3],
[2, 2, 3, 3]]))
self.assertEqual(regridded, expected)
def test_regrid_upsampling_linear(self):
### This test causes a numba error using 0.35.0 - temporarily disabled ###
return
img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))
regridded = regrid(img, width=4, height=4, upsample=True, interpolation='linear', dynamic=False)
expected = Image(([0.25, 0.75, 1.25, 1.75], [0.25, 0.75, 1.25, 1.75],
[[0, 0, 0, 1],
[0, 1, 1, 1],
[1, 1, 2, 2],
[2, 2, 2, 3]]))
self.assertEqual(regridded, expected)
def test_regrid_disabled_upsampling(self):
img = Image(([0.5, 1.5], [0.5, 1.5], [[0, 1], [2, 3]]))
regridded = regrid(img, width=3, height=3, dynamic=False, upsample=False)
self.assertEqual(regridded, img)
def test_regrid_disabled_expand(self):
img = Image(([0.5, 1.5], [0.5, 1.5], [[0., 1.], [2., 3.]]))
regridded = regrid(img, width=2, height=2, x_range=(-2, 4), y_range=(-2, 4), expand=False,
dynamic=False)
self.assertEqual(regridded, img)
def test_regrid_zero_range(self):
ls = np.linspace(0, 10, 200)
xx, yy = np.meshgrid(ls, ls)
img = Image(np.sin(xx)*np.cos(yy), bounds=(0, 0, 1, 1))
regridded = regrid(img, x_range=(-1, -0.5), y_range=(-1, -0.5), dynamic=False)
expected = Image(np.zeros((0, 0)), bounds=(0, 0, 0, 0), xdensity=1, ydensity=1)
self.assertEqual(regridded, expected)
class DatashaderRasterizeTests(ComparisonTestCase):
"""
Tests for datashader aggregation
"""
def setUp(self):
if ds_version <= '0.6.4':
raise SkipTest('Regridding operations require datashader>=0.7.0')
self.simplexes = [(0, 1, 2), (3, 2, 1)]
self.vertices = [(0., 0.), (0., 1.), (1., 0), (1, 1)]
self.simplexes_vdim = [(0, 1, 2, 0.5), (3, 2, 1, 1.5)]
self.vertices_vdim = [(0., 0., 1), (0., 1., 2), (1., 0, 3), (1, 1, 4)]
def test_rasterize_trimesh_no_vdims(self):
trimesh = TriMesh((self.simplexes, self.vertices))
img = rasterize(trimesh, width=3, height=3, dynamic=False)
image = Image(np.array([[True, True, True], [True, True, True], [True, True, True]]),
bounds=(0, 0, 1, 1), vdims=Dimension('Any', nodata=0))
self.assertEqual(img, image)
def test_rasterize_trimesh_no_vdims_zero_range(self):
trimesh = TriMesh((self.simplexes, self.vertices))
img = rasterize(trimesh, height=2, x_range=(0, 0), dynamic=False)
image = Image(([], [0.25, 0.75], np.zeros((2, 0))),
bounds=(0, 0, 0, 1), xdensity=1, vdims=Dimension('Any', nodata=0))
self.assertEqual(img, image)
def test_rasterize_trimesh_with_vdims_as_wireframe(self):
trimesh = TriMesh((self.simplexes_vdim, self.vertices), vdims=['z'])
img = rasterize(trimesh, width=3, height=3, aggregator='any', interpolation=None, dynamic=False)
array = np.array([
[True, True, True],
[True, True, True],
[True, True, True]
])
image = Image(array, bounds=(0, 0, 1, 1), vdims=Dimension('Any', nodata=0))
self.assertEqual(img, image)
def test_rasterize_trimesh(self):
trimesh = TriMesh((self.simplexes_vdim, self.vertices), vdims=['z'])
img = rasterize(trimesh, width=3, height=3, dynamic=False)
array = np.array([
[ 1.5, 1.5, np.NaN],
[ 0.5, 1.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_pandas_trimesh_implicit_nodes(self):
simplex_df = pd.DataFrame(self.simplexes, columns=['v0', 'v1', 'v2'])
vertex_df = pd.DataFrame(self.vertices_vdim, columns=['x', 'y', 'z'])
trimesh = TriMesh((simplex_df, vertex_df))
img = rasterize(trimesh, width=3, height=3, dynamic=False)
array = np.array([
[ 2., 3., np.NaN],
[ 1.5, 2.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_dask_trimesh_implicit_nodes(self):
simplex_df = pd.DataFrame(self.simplexes, columns=['v0', 'v1', 'v2'])
vertex_df = pd.DataFrame(self.vertices_vdim, columns=['x', 'y', 'z'])
simplex_ddf = dd.from_pandas(simplex_df, npartitions=2)
vertex_ddf = dd.from_pandas(vertex_df, npartitions=2)
trimesh = TriMesh((simplex_ddf, vertex_ddf))
ri = rasterize.instance()
img = ri(trimesh, width=3, height=3, dynamic=False, precompute=True)
cache = ri._precomputed
self.assertEqual(len(cache), 1)
self.assertIn(trimesh._plot_id, cache)
self.assertIsInstance(cache[trimesh._plot_id]['mesh'], dd.DataFrame)
array = np.array([
[ 2., 3., np.NaN],
[ 1.5, 2.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_dask_trimesh(self):
simplex_df = pd.DataFrame(self.simplexes_vdim, columns=['v0', 'v1', 'v2', 'z'])
vertex_df = pd.DataFrame(self.vertices, columns=['x', 'y'])
simplex_ddf = dd.from_pandas(simplex_df, npartitions=2)
vertex_ddf = dd.from_pandas(vertex_df, npartitions=2)
tri_nodes = Nodes(vertex_ddf, ['x', 'y', 'index'])
trimesh = TriMesh((simplex_ddf, tri_nodes), vdims=['z'])
ri = rasterize.instance()
img = ri(trimesh, width=3, height=3, dynamic=False, precompute=True)
cache = ri._precomputed
self.assertEqual(len(cache), 1)
self.assertIn(trimesh._plot_id, cache)
self.assertIsInstance(cache[trimesh._plot_id]['mesh'], dd.DataFrame)
array = np.array([
[ 1.5, 1.5, np.NaN],
[ 0.5, 1.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_dask_trimesh_with_node_vdims(self):
simplex_df = pd.DataFrame(self.simplexes, columns=['v0', 'v1', 'v2'])
vertex_df = pd.DataFrame(self.vertices_vdim, columns=['x', 'y', 'z'])
simplex_ddf = dd.from_pandas(simplex_df, npartitions=2)
vertex_ddf = dd.from_pandas(vertex_df, npartitions=2)
tri_nodes = Nodes(vertex_ddf, ['x', 'y', 'index'], ['z'])
trimesh = TriMesh((simplex_ddf, tri_nodes))
ri = rasterize.instance()
img = ri(trimesh, width=3, height=3, dynamic=False, precompute=True)
cache = ri._precomputed
self.assertEqual(len(cache), 1)
self.assertIn(trimesh._plot_id, cache)
self.assertIsInstance(cache[trimesh._plot_id]['mesh'], dd.DataFrame)
array = np.array([
[ 2., 3., np.NaN],
[ 1.5, 2.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_trimesh_node_vdim_precedence(self):
nodes = Points(self.vertices_vdim, vdims=['node_z'])
trimesh = TriMesh((self.simplexes_vdim, nodes), vdims=['z'])
img = rasterize(trimesh, width=3, height=3, dynamic=False)
array = np.array([
[ 2., 3., np.NaN],
[ 1.5, 2.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1), vdims='node_z')
self.assertEqual(img, image)
def test_rasterize_trimesh_node_explicit_vdim(self):
nodes = Points(self.vertices_vdim, vdims=['node_z'])
trimesh = TriMesh((self.simplexes_vdim, nodes), vdims=['z'])
img = rasterize(trimesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z'))
array = np.array([
[ 1.5, 1.5, np.NaN],
[ 0.5, 1.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_trimesh_zero_range(self):
trimesh = TriMesh((self.simplexes_vdim, self.vertices), vdims=['z'])
img = rasterize(trimesh, x_range=(0, 0), height=2, dynamic=False)
image = Image(([], [0.25, 0.75], np.zeros((2, 0))),
bounds=(0, 0, 0, 1), xdensity=1)
self.assertEqual(img, image)
def test_rasterize_trimesh_vertex_vdims(self):
simplices = [(0, 1, 2), (3, 2, 1)]
vertices = [(0., 0., 1), (0., 1., 2), (1., 0., 3), (1., 1., 4)]
trimesh = TriMesh((simplices, Points(vertices, vdims='z')))
img = rasterize(trimesh, width=3, height=3, dynamic=False)
image = Image(np.array([[2., 3., np.NaN], [1.5, 2.5, np.NaN], [np.NaN, np.NaN, np.NaN]]),
bounds=(0, 0, 1, 1), vdims='z')
self.assertEqual(img, image)
def test_rasterize_trimesh_ds_aggregator(self):
trimesh = TriMesh((self.simplexes_vdim, self.vertices), vdims=['z'])
img = rasterize(trimesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z'))
array = np.array([
[ 1.5, 1.5, np.NaN],
[ 0.5, 1.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_trimesh_string_aggregator(self):
trimesh = TriMesh((self.simplexes_vdim, self.vertices), vdims=['z'])
img = rasterize(trimesh, width=3, height=3, dynamic=False, aggregator='mean')
array = np.array([
[ 1.5, 1.5, np.NaN],
[ 0.5, 1.5, np.NaN],
[np.NaN, np.NaN, np.NaN]
])
image = Image(array, bounds=(0, 0, 1, 1))
self.assertEqual(img, image)
def test_rasterize_quadmesh(self):
qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]])))
img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator=ds.mean('z'))
image = Image(np.array([[2, 3, 3], [2, 3, 3], [0, 1, 1]]),
bounds=(-.5, -.5, 1.5, 1.5))
self.assertEqual(img, image)
def test_rasterize_quadmesh_string_aggregator(self):
qmesh = QuadMesh(([0, 1], [0, 1], np.array([[0, 1], [2, 3]])))
img = rasterize(qmesh, width=3, height=3, dynamic=False, aggregator='mean')
image = Image(np.array([[2, 3, 3], [2, 3, 3], [0, 1, 1]]),
bounds=(-.5, -.5, 1.5, 1.5))
self.assertEqual(img, image)
def test_rasterize_points(self):
points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])
img = rasterize(points, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
self.assertEqual(img, expected)
def test_rasterize_curve(self):
curve = Curve([(0.2, 0.3), (0.4, 0.7), (0.8, 0.99)])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [1, 1]]),
vdims=[Dimension('Count', nodata=0)])
img = rasterize(curve, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_rasterize_ndoverlay(self):
ds = Dataset([(0.2, 0.3, 0), (0.4, 0.7, 1), (0, 0.99, 2)], kdims=['x', 'y', 'z'])
ndoverlay = ds.to(Points, ['x', 'y'], [], 'z').overlay()
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 0]]),
vdims=[Dimension('Count', nodata=0)])
img = rasterize(ndoverlay, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_rasterize_path(self):
path = Path([[(0.2, 0.3), (0.4, 0.7)], [(0.4, 0.7), (0.8, 0.99)]])
expected = Image(([0.25, 0.75], [0.25, 0.75], [[1, 0], [2, 1]]),
vdims=[Dimension('Count', nodata=0)])
img = rasterize(path, dynamic=False, x_range=(0, 1), y_range=(0, 1),
width=2, height=2)
self.assertEqual(img, expected)
def test_rasterize_image(self):
img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))
regridded = regrid(img, width=2, height=2, dynamic=False)
expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))
self.assertEqual(regridded, expected)
def test_rasterize_image_string_aggregator(self):
img = Image((range(10), range(5), np.arange(10) * np.arange(5)[np.newaxis].T))
regridded = regrid(img, width=2, height=2, dynamic=False, aggregator='mean')
expected = Image(([2., 7.], [0.75, 3.25], [[1, 5], [6, 22]]))
self.assertEqual(regridded, expected)
class DatashaderSpreadTests(ComparisonTestCase):
def test_spread_rgb_1px(self):
arr = np.array([[[0, 0, 0], [0, 1, 1], [0, 1, 1]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]], dtype=np.uint8).T*255
spreaded = spread(RGB(arr))
arr = np.array([[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]], dtype=np.uint8).T*255
self.assertEqual(spreaded, RGB(arr))
def test_spread_img_1px(self):
if ds_version < '0.12.0':
raise SkipTest('Datashader does not support DataArray yet')
arr = np.array([[0, 0, 0], [0, 0, 0], [1, 1, 1]]).T
spreaded = spread(Image(arr))
arr = np.array([[0, 0, 0], [2, 3, 2], [2, 3, 2]]).T
self.assertEqual(spreaded, Image(arr))
class DatashaderStackTests(ComparisonTestCase):
def setUp(self):
self.rgb1_arr = np.array([[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8).T*255
self.rgb2_arr = np.array([[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[1, 0], [0, 1]]], dtype=np.uint8).T*255
self.rgb1 = RGB(self.rgb1_arr)
self.rgb2 = RGB(self.rgb2_arr)
def test_stack_add_compositor(self):
combined = stack(self.rgb1*self.rgb2, compositor='add')
arr = np.array([[[0, 255, 255], [255,0, 0]], [[255, 0, 0], [0, 255, 255]]], dtype=np.uint8)
expected = RGB(arr)
self.assertEqual(combined, expected)
def test_stack_over_compositor(self):
combined = stack(self.rgb1*self.rgb2, compositor='over')
self.assertEqual(combined, self.rgb2)
def test_stack_over_compositor_reverse(self):
combined = stack(self.rgb2*self.rgb1, compositor='over')
self.assertEqual(combined, self.rgb1)
def test_stack_saturate_compositor(self):
combined = stack(self.rgb1*self.rgb2, compositor='saturate')
self.assertEqual(combined, self.rgb1)
def test_stack_saturate_compositor_reverse(self):
combined = stack(self.rgb2*self.rgb1, compositor='saturate')
self.assertEqual(combined, self.rgb2)
class GraphBundlingTests(ComparisonTestCase):
def setUp(self):
if ds_version <= '0.7.0':
raise SkipTest('Regridding operations require datashader>=0.7.0')
self.source = np.arange(8)
self.target = np.zeros(8)
self.graph = Graph(((self.source, self.target),))
def test_directly_connect_paths(self):
direct = directly_connect_edges(self.graph)._split_edgepaths
self.assertEqual(direct, self.graph.edgepaths)
class InspectorTests(ComparisonTestCase):
"""
Tests for inspector operations
"""
def setUp(self):
points = Points([(0.2, 0.3), (0.4, 0.7), (0, 0.99)])
self.pntsimg = rasterize(points, dynamic=False,
x_range=(0, 1), y_range=(0, 1), width=4, height=4)
if spatialpandas is None:
return
xs1 = [1, 2, 3]; xs2 = [6, 7, 3];ys1 = [2, 0, 7]; ys2 = [7, 5, 2]
holes = [ [[(1.5, 2), (2, 3), (1.6, 1.6)], [(2.1, 4.5), (2.5, 5), (2.3, 3.5)]],]
polydata = [{'x': xs1, 'y': ys1, 'holes': holes, 'z': 1},
{'x': xs2, 'y': ys2, 'holes': [[]], 'z': 2}]
self.polysrgb = datashade(Polygons(polydata, vdims=['z'],
datatype=['spatialpandas']),
x_range=(0, 7), y_range=(0, 7), dynamic=False)
def tearDown(self):
Tap.x, Tap.y = None, None
def test_inspect_points_or_polygons(self):
if spatialpandas is None:
raise SkipTest('Polygon inspect tests require spatialpandas')
polys = inspect(self.polysrgb,
max_indicators=3, dynamic=False, pixels=1, x=6, y=5)
self.assertEqual(polys, Polygons([{'x': [6, 3, 7], 'y': [7, 2, 5], 'z': 2}], vdims='z'))
points = inspect(self.pntsimg, max_indicators=3, dynamic=False, pixels=1, x=-0.1, y=-0.1)
self.assertEqual(points.dimension_values('x'), np.array([]))
self.assertEqual(points.dimension_values('y'), np.array([]))
def test_points_inspection_1px_mask(self):
points = inspect_points(self.pntsimg, max_indicators=3, dynamic=False, pixels=1, x=-0.1, y=-0.1)
self.assertEqual(points.dimension_values('x'), np.array([]))
self.assertEqual(points.dimension_values('y'), np.array([]))
def test_points_inspection_2px_mask(self):
points = inspect_points(self.pntsimg, max_indicators=3, dynamic=False, pixels=2, x=-0.1, y=-0.1)
self.assertEqual(points.dimension_values('x'), np.array([0.2]))
self.assertEqual(points.dimension_values('y'), np.array([0.3]))
def test_points_inspection_4px_mask(self):
points = inspect_points(self.pntsimg, max_indicators=3, dynamic=False, pixels=4, x=-0.1, y=-0.1)
self.assertEqual(points.dimension_values('x'), np.array([0.2, 0.4]))
self.assertEqual(points.dimension_values('y'), np.array([0.3, 0.7]))
def test_points_inspection_5px_mask(self):
points = inspect_points(self.pntsimg, max_indicators=3, dynamic=False, pixels=5, x=-0.1, y=-0.1)
self.assertEqual(points.dimension_values('x'), np.array([0.2, 0.4, 0]))
self.assertEqual(points.dimension_values('y'), np.array([0.3, 0.7, 0.99]))
def test_inspection_5px_mask_points_df(self):
inspector = inspect.instance(max_indicators=3, dynamic=False, pixels=5,
x=-0.1, y=-0.1)
inspector(self.pntsimg)
self.assertEqual(list(inspector.hits['x']),[0.2,0.4,0.0])
self.assertEqual(list(inspector.hits['y']),[0.3,0.7,0.99])
def test_points_inspection_dict_streams(self):
Tap.x, Tap.y = 0.4, 0.7
points = inspect_points(self.pntsimg, max_indicators=3, dynamic=True,
pixels=1, streams=dict(x=Tap.param.x, y=Tap.param.y))
self.assertEqual(len(points.streams), 1)
self.assertEqual(isinstance(points.streams[0], Tap), True)
self.assertEqual(points.streams[0].x, 0.4)
self.assertEqual(points.streams[0].y, 0.7)
def test_points_inspection_dict_streams_instance(self):
Tap.x, Tap.y = 0.2, 0.3
inspector = inspect_points.instance(max_indicators=3, dynamic=True, pixels=1,
streams=dict(x=Tap.param.x, y=Tap.param.y))
points = inspector(self.pntsimg)
self.assertEqual(len(points.streams), 1)
self.assertEqual(isinstance(points.streams[0], Tap), True)
self.assertEqual(points.streams[0].x, 0.2)
self.assertEqual(points.streams[0].y, 0.3)
def test_polys_inspection_1px_mask_hit(self):
if spatialpandas is None:
raise SkipTest('Polygon inspect tests require spatialpandas')
polys = inspect_polygons(self.polysrgb,
max_indicators=3, dynamic=False, pixels=1, x=6, y=5)
self.assertEqual(polys, Polygons([{'x': [6, 3, 7], 'y': [7, 2, 5], 'z': 2}],
vdims='z'))
def test_inspection_1px_mask_poly_df(self):
if spatialpandas is None:
raise SkipTest('Polygon inspect tests require spatialpandas')
inspector = inspect.instance(max_indicators=3, dynamic=False, pixels=1, x=6, y=5)
inspector(self.polysrgb)
self.assertEqual(len(inspector.hits), 1)
data = [[6.0, 7.0, 3.0, 2.0, 7.0, 5.0, 6.0, 7.0]]
self.assertEqual(inspector.hits.iloc[0].geometry,
spatialpandas.geometry.polygon.Polygon(data))
def test_polys_inspection_1px_mask_miss(self):
if spatialpandas is None:
raise SkipTest('Polygon inspect tests require spatialpandas')
polys = inspect_polygons(self.polysrgb,
max_indicators=3, dynamic=False, pixels=1, x=0, y=0)
self.assertEqual(polys, Polygons([], vdims='z'))
| ioam/holoviews | holoviews/tests/operation/test_datashader.py | Python | bsd-3-clause | 59,238 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .docker_operator import *
from .subdag_operator import *
from .operators import *
from .sensors import *
from .hive_operator import *
from .s3_to_hive_operator import *
| zodiac/incubator-airflow | tests/operators/__init__.py | Python | apache-2.0 | 741 |
# #
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Support for MPICH as toolchain MPI library.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Dmitri Gribenko (National Technical University of Ukraine "KPI")
"""
from distutils.version import LooseVersion
from easybuild.tools.toolchain.constants import COMPILER_VARIABLES, MPI_COMPILER_VARIABLES
from easybuild.tools.toolchain.mpi import Mpi
from easybuild.tools.toolchain.variables import CommandFlagList
TC_CONSTANT_MPICH = "MPICH"
TC_CONSTANT_MPI_TYPE_MPICH = "MPI_TYPE_MPICH"
class Mpich(Mpi):
"""MPICH MPI class"""
MPI_MODULE_NAME = ['MPICH']
MPI_FAMILY = TC_CONSTANT_MPICH
MPI_TYPE = TC_CONSTANT_MPI_TYPE_MPICH
MPI_LIBRARY_NAME = 'mpich'
# version-dependent, so defined at runtime
MPI_COMPILER_MPIF77 = None
MPI_COMPILER_MPIF90 = None
MPI_COMPILER_MPIFC = None
# clear MPI wrapper command options
MPI_SHARED_OPTION_MAP = dict([('_opt_%s' % var, '') for var, _ in MPI_COMPILER_VARIABLES])
def _set_mpi_compiler_variables(self):
"""Set the MPICH_{CC, CXX, F77, F90, FC} variables."""
# determine MPI wrapper commands to use based on MPICH version
if self.MPI_COMPILER_MPIF77 is None and self.MPI_COMPILER_MPIF90 is None and self.MPI_COMPILER_MPIFC is None:
# mpif77/mpif90 for MPICH v3.1.0 and earlier, mpifort for MPICH v3.1.2 and newer
# see http://www.mpich.org/static/docs/v3.1/ vs http://www.mpich.org/static/docs/v3.1.2/
if LooseVersion(self.get_software_version(self.MPI_MODULE_NAME)[0]) >= LooseVersion('3.1.2'):
self.MPI_COMPILER_MPIF77 = 'mpif77'
self.MPI_COMPILER_MPIF90 = 'mpifort'
self.MPI_COMPILER_MPIFC = 'mpifort'
else:
self.MPI_COMPILER_MPIF77 = 'mpif77'
self.MPI_COMPILER_MPIF90 = 'mpif90'
self.MPI_COMPILER_MPIFC = 'mpif90'
# this needs to be done first, otherwise e.g., CC is set to MPICC if the usempi toolchain option is enabled
for var, _ in COMPILER_VARIABLES:
self.variables.nappend('MPICH_%s' % var, str(self.variables[var].get_first()), var_class=CommandFlagList)
super(Mpich, self)._set_mpi_compiler_variables()
| hpcleuven/easybuild-framework | easybuild/toolchains/mpi/mpich.py | Python | gpl-2.0 | 3,346 |
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class CompleteDirectOnlineTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Address endpoint not found")
else:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(20)
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDirectOnlineTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
slug = resp["slug"]
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ipns/" + alice["peerId"] + "/listings.json"
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDirectOnlineTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDirectOnlineTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Alice incorrectly saved as unfunded")
# alice send order fulfillment
with open('testdata/fulfillment.json') as fulfillment_file:
fulfillment_json = json.load(fulfillment_file, object_pairs_hook=OrderedDict)
fulfillment_json["orderId"] = orderId
fulfillment_json["slug"] = slug
api_url = alice["gateway_url"] + "ob/orderfulfillment"
r = requests.post(api_url, data=json.dumps(fulfillment_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Fulfillment post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDirectOnlineTest - FAIL: Fulfillment POST failed. Reason: %s", resp["reason"])
time.sleep(5)
# check bob received fulfillment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FULFILLED":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Bob failed to detect order fulfillment")
# check alice set fulfillment correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FULFILLED":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Alice failed to order fulfillment")
# bob send order completion
oc = {
"orderId": orderId,
"ratings": [
{
"slug": slug,
"overall": 4,
"quality": 5,
"description": 5,
"customerService": 4,
"deliverySpeed": 3,
"review": "I love it!"
}
]
}
api_url = bob["gateway_url"] + "ob/ordercompletion"
r = requests.post(api_url, data=json.dumps(oc, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Completion post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteDirectOnlineTest - FAIL: Completion POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check alice received completion
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Alice failed to detect order completion")
# check bob set completion correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteDirectOnlineTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteDirectOnlineTest - FAIL: Bob failed to order completion")
print("CompleteDirectOnlineTest - PASS")
if __name__ == '__main__':
print("Running CompleteDirectOnlineTest")
CompleteDirectOnlineTest().main(["--regtest", "--disableexchangerates"])
| duomarket/openbazaar-test-nodes | qa/complete_direct_online.py | Python | mit | 9,485 |
import numpy
from trefoil.geometry.bbox import BBox
from ncdjango.geoprocessing.data import Raster
from seedsource_core.django.seedsource.tasks.generate_scores import GenerateScores
def test_generate_scores_workflow_sanity():
# Simple
raster_1 = Raster(numpy.reshape(numpy.arange(100), (10, 10)), BBox((0, 0, 10, 10)), 1, 0)
raster_2 = Raster(numpy.reshape(numpy.arange(100, 200), (10, 10)), BBox((0, 0, 10, 10)), 1, 0)
limits = [{'min': 30, 'max': 70}, {'min': 140, 'max': 160}]
GenerateScores()(variables=[raster_1, raster_2], limits=limits, region='test')
def test_generate_scores_workflow_validity():
"""Test simple 2x2 grid against pre-calculated values"""
ahm = Raster(numpy.array([[284, 274], [307, 298]]), BBox((0, 0, 10, 10)), 1, 0)
cmd = Raster(numpy.array([[292, 305], [300, 291]]), BBox((0, 0, 10, 10)), 1, 0)
limits = [{'min': 264, 'max': 304}, {'min': 271, 'max': 311}]
expected_mask = numpy.array([[False, False], [True, False]])
expected_results = numpy.ma.masked_array([[95, 14], [None, 30]], mask=expected_mask)
results = GenerateScores()(variables=[ahm, cmd], limits=limits, region='test')
assert (results['raster_out'].mask == expected_mask).all()
assert (results['raster_out'] == expected_results).all()
| consbio/seedsource-core | seedsource_core/django/seedsource/tests/test_score_calculation.py | Python | bsd-3-clause | 1,290 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NameAvailability(Model):
"""Represents a resource name availability.
:param message: Error Message.
:type message: str
:param name_available: Indicates whether the resource name is available.
:type name_available: bool
:param reason: Reason for name being unavailable.
:type reason: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(self, message=None, name_available=None, reason=None):
self.message = message
self.name_available = name_available
self.reason = reason
| AutorestCI/azure-sdk-for-python | azure-mgmt-rdbms/azure/mgmt/rdbms/postgresql/models/name_availability.py | Python | mit | 1,227 |
import elementary
import evas
import os
class stationWindow(elementary.Box):
def __init__( self, parent ):
#builds a elementary box to accept login information
elementary.Box.__init__(self, parent.mainWindow)
self.ourPlayer = parent.ourPlayer
self.win = parent.mainWindow
self.rent = parent
self.lst = lst = elementary.List(self.win)
lst.size_hint_weight_set(evas.EVAS_HINT_EXPAND, evas.EVAS_HINT_EXPAND)
lst.size_hint_align_set(evas.EVAS_HINT_FILL, evas.EVAS_HINT_FILL)
lst.callback_clicked_double_add(self.station_popup)
lst.callback_longpressed_add(self.station_popup)
stations = self.ourPlayer.getStations()
for station in stations:
lst.item_append(str(station['stationName']))
lst.show()
chng = elementary.Button(self.win)
chng.text_set("Change Station")
chng.callback_unpressed_add(self.change_station)
chng.show()
sep = elementary.Separator(self.win)
sep.show()
crt = elementary.Button(self.win)
crt.text = "Create Station"
crt.callback_unpressed_add(lambda x: parent.spawn_create())
crt.show()
ex = elementary.Button(self.win)
ex.text_set("Back")
ex.callback_unpressed_add(lambda x: parent.nf.item_pop())
ex.show()
bbox = elementary.Box(self.win)
bbox.horizontal = True
bbox.pack_end(chng)
bbox.pack_end(sep)
bbox.pack_end(crt)
bbox.pack_end(sep)
bbox.pack_end(ex)
bbox.show()
self.pack_end(lst)
self.pack_end(bbox)
def popup_message(self, message, title, callback=False):
popup = elementary.Popup(self.win)
popup.text = message
popup.part_text_set("title,text", title)
bt = elementary.Button(self.win)
bt.text = "OK"
if callback:
bt.callback_clicked_add(callback, popup)
bt.callback_clicked_add(lambda x: popup.hide())
popup.part_content_set("button1", bt)
popup.show()
def station_popup(self, lst, item):
print lst
print item.text
cp = elementary.Ctxpopup(self.win)
cp.item_append("Play", None, self.change_station)
cp.item_append("Rename", None, self.station_rename)
cp.item_append("Delete", None, self.station_delete)
pos = self.win.evas.pointer_canvas_xy_get()
cp.pos = pos
cp.show()
def station_rename(self, lst, i):
cp = i.widget_get()
cp.dismiss()
item = self.lst.selected_item_get()
self.rent.spawn_rename(item.text)
def station_delete(self, lst, i):
cp = i.widget_get()
cp.dismiss()
item = self.lst.selected_item_get()
print item.text
popup = elementary.Popup(self.win)
popup.text = item.text
popup.part_text_set("title,text", "Really Delete?")
bt = elementary.Button(self.win)
bt.text = "Cancel"
bt.callback_clicked_add(lambda x: popup.hide())
ys = elementary.Button(self.win)
ys.text = "Yes"
ys.callback_clicked_add(self.really_delete, item.text)
ys.callback_clicked_add(lambda x: popup.hide())
popup.part_content_set("button1", bt)
popup.part_content_set("button2", ys)
popup.show()
def really_delete(self, pop, name):
station = self.ourPlayer.getStationFromName(name)
print station
self.ourPlayer.deleteStation(station)
self.rent.nf.item_pop()
def change_station(self, bt=False, i=False):
if i:
cp = i.widget_get()
cp.dismiss()
item = self.lst.selected_item_get()
#self.rent.spawn_player()
if item:
self.ourPlayer.setStation(self.ourPlayer.getStationFromName(item.text))
home = os.path.expanduser("~")
if not os.path.exists("%s/.config/eAndora"%home):
os.makedirs("%s/.config/eAndora"%home)
if os.path.exists("%s/.config/eAndora/stationinfo"%home):
os.remove('%s/.config/eAndora/stationinfo'%home)
f = open('%s/.config/eAndora/stationinfo'%home, 'w')
f.write('%s\n'%item.text)
f.close()
self.ourPlayer.pauseSong()
self.ourPlayer.clearSongs()
self.ourPlayer.addSongs()
self.ourPlayer.gui.refreshInterface(True)
self.rent.nf.item_pop()
| JeffHoogland/eandora | eAndora/stationWindow.py | Python | bsd-3-clause | 4,504 |
from distutils.core import setup, Extension
include_dirs = []
library_dirs = []
libraries = []
runtime_library_dirs = []
extra_objects = []
define_macros = []
setup(name = "villa",
version = "0.1",
author = "Li Guangming",
license = "MIT",
url = "https://github.com/cute/villa",
packages = ["villa"],
ext_package = "villa",
ext_modules = [Extension( name = "villa",
sources = [
"src/pyvilla.c",
"src/depot.c",
"src/cabin.c",
"src/myconf.c",
"src/villa.c",
],
include_dirs = include_dirs,
library_dirs = library_dirs,
runtime_library_dirs = runtime_library_dirs,
libraries = libraries,
extra_objects = extra_objects,
define_macros = define_macros
)],
)
| cute/villa | setup.py | Python | mit | 1,149 |
from datetime import time
from pytest import approx
from libretime_shared.datetime import time_in_milliseconds, time_in_seconds
def test_time_in_seconds():
value = time(hour=0, minute=3, second=34, microsecond=649600)
assert time_in_seconds(value) == approx(214.65, abs=0.009)
def test_time_in_milliseconds():
value = time(hour=0, minute=0, second=0, microsecond=500000)
assert time_in_milliseconds(value) == 500
| LibreTime/libretime | shared/tests/datetime_test.py | Python | agpl-3.0 | 435 |
from . import time_weekday
from . import time_window_mixin
| OCA/server-tools | base_time_window/models/__init__.py | Python | agpl-3.0 | 59 |
from builtins import object
from collections import namedtuple
from itertools import combinations
import numpy as np
class KendallColijn(object):
"""
Data structure that stores info about a tree
that is needed to compute the Kendall-Colijn
tree distance metric - i.e. the vectors
m and M
"""
def __init__(self, tree):
"""
Initialise the data structure, compute m and M.
"""
info = self._precompute(tree._tree)
m, M = self._get_vectors(tree._tree, info)
self.little_m = m
self.big_m = M
self.tree = tree
def _equalise_leaf_sets(self, other, inplace):
t1 = self.tree
t2 = other.tree
intersect = t1 & t2
if t1.labels != intersect:
pruned1 = t1.prune_to_subset(intersect, inplace)
else:
pruned1 = t1
if t2.labels != intersect:
pruned2 = t2.prune_to_subset(intersect, inplace)
else:
pruned2 = t2
return pruned1, pruned2
def _precompute(self, tree):
"""
Collect metric info in a single preorder traversal.
"""
d = {}
for n in tree.preorder_internal_node_iter():
d[n] = namedtuple('NodeDist', ['dist_from_root', 'edges_from_root'])
if n.parent_node:
d[n].dist_from_root = d[n.parent_node].dist_from_root + n.edge_length
d[n].edges_from_root = d[n.parent_node].edges_from_root + 1
else:
d[n].dist_from_root = 0.0
d[n].edges_from_root = 0
return d
def _get_vectors(self, tree, precomputed_info):
"""
Populate the vectors m and M.
"""
little_m = []
big_m = []
leaf_nodes = sorted(tree.leaf_nodes(), key=lambda x: x.taxon.label)
# inner nodes, sorted order
for leaf_a, leaf_b in combinations(leaf_nodes, 2):
mrca = tree.mrca(taxa=[leaf_a.taxon, leaf_b.taxon])
little_m.append(precomputed_info[mrca].edges_from_root)
big_m.append(precomputed_info[mrca].dist_from_root)
# leaf nodes, sorted order
for leaf in leaf_nodes:
little_m.append(1)
big_m.append(leaf.edge_length)
return np.array(little_m), np.array(big_m)
def get_vector(self, lbda=0.5):
"""
The vector v is the weighted average of m and M.
lbda, a.k.a. lambda, is the weighting parameter.
"""
return (1-lbda)*self.little_m + lbda*self.big_m
def get_distance(self, other, lbda=0.5, min_overlap=4):
"""
Return the Euclidean distance between vectors v of
two trees. Must have the same leaf set (too lazy to check).
"""
if self.tree ^ other.tree:
if len(self.tree & other.tree) < min_overlap:
return 0
# raise AttributeError('Can\'t calculate tree distances when tree overlap is less than two leaves')
else:
t1, t2 = self._equalise_leaf_sets(other, False)
tmp_self = KendallColijn(t1)
tmp_other = KendallColijn(t2)
return np.sqrt(((tmp_self.get_vector(lbda) - tmp_other.get_vector(lbda)) ** 2).sum())
else:
return np.sqrt(((self.get_vector(lbda) - other.get_vector(lbda)) ** 2).sum())
| DessimozLab/treeCl | treeCl/utils/kendallcolijn.py | Python | mit | 3,393 |
# -*- coding: utf-8 -*-
from io import BytesIO
from pytest import raises
from translate.misc.multistring import multistring
from translate.storage import base, jsonl10n, test_monolingual
JSON_I18NEXT = b"""{
"key": "value",
"keyDeep": {
"inner": "value"
},
"keyPluralSimple": "the singular",
"keyPluralSimple_plural": "the plural",
"keyPluralMultipleEgArabic_0": "the plural form 0",
"keyPluralMultipleEgArabic_1": "the plural form 1",
"keyPluralMultipleEgArabic_2": "the plural form 2",
"keyPluralMultipleEgArabic_3": "the plural form 3",
"keyPluralMultipleEgArabic_4": "the plural form 4",
"keyPluralMultipleEgArabic_5": "the plural form 5"
}
"""
JSON_I18NEXT_PLURAL = b"""{
"key": "value",
"keyDeep": {
"inner": "value"
},
"keyPluralSimple": "Ahoj",
"keyPluralMultipleEgArabic": "Nazdar"
}
"""
JSON_ARRAY = b"""{
"key": [
"One",
"Two",
"Three"
]
}
"""
class TestJSONResourceUnit(test_monolingual.TestMonolingualUnit):
UnitClass = jsonl10n.JsonUnit
class TestJSONResourceStore(test_monolingual.TestMonolingualStore):
StoreClass = jsonl10n.JsonFile
def test_serialize(self):
store = self.StoreClass()
store.parse('{"key": "value"}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": "value"\n}\n'
def test_error(self):
store = self.StoreClass()
with raises(base.ParseError):
store.parse('{"key": "value"')
def test_filter(self):
store = self.StoreClass(filter=['key'])
store.parse('{"key": "value", "other": "second"}')
assert len(store.units) == 1
assert store.units[0].source == 'value'
def test_ordering(self):
store = self.StoreClass()
store.parse('''{
"foo": "foo",
"bar": "bar",
"baz": "baz"
}''')
assert store.units[0].source == 'foo'
assert store.units[2].source == 'baz'
def test_args(self):
store = self.StoreClass()
store.parse('''{
"foo": "foo",
"bar": "bar",
"baz": "baz"
}''')
store.dump_args['sort_keys'] = True
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'''{
"bar": "bar",
"baz": "baz",
"foo": "foo"
}
'''
def test_bom(self):
content = "{}\n".encode("utf-8-sig")
store = self.StoreClass()
store.parse(content)
assert len(store.units) == 0
out = BytesIO()
store.serialize(out)
assert out.getvalue() == content
class TestJSONNestedResourceStore(test_monolingual.TestMonolingualUnit):
StoreClass = jsonl10n.JsonNestedFile
def test_serialize(self):
store = self.StoreClass()
store.parse('{"key": {"second": "value"}}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "second": "value"\n }\n}\n'
def test_ordering(self):
data = b'''{
"foo": "foo",
"bar": {
"ba1": "bag",
"ba2": "bag",
"ba3": "bag",
"ba4": "baz"
}
}
'''
store = self.StoreClass()
store.parse(data)
assert store.units[0].source == 'foo'
assert store.units[1].getid() == '.bar.ba1'
assert store.units[2].getid() == '.bar.ba2'
assert store.units[3].getid() == '.bar.ba3'
assert store.units[4].getid() == '.bar.ba4'
out = BytesIO()
store.serialize(out)
assert out.getvalue() == data
def test_array(self):
store = self.StoreClass()
store.parse(JSON_ARRAY)
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_ARRAY
class TestWebExtensionUnit(test_monolingual.TestMonolingualUnit):
UnitClass = jsonl10n.WebExtensionJsonUnit
class TestWebExtensionStore(test_monolingual.TestMonolingualStore):
StoreClass = jsonl10n.WebExtensionJsonFile
def test_serialize(self):
store = self.StoreClass()
store.parse('{"key": {"message": "value", "description": "note"}}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "message": "value",\n "description": "note"\n }\n}\n'
def test_serialize_no_description(self):
store = self.StoreClass()
store.parse('{"key": {"message": "value"}}')
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "message": "value"\n }\n}\n'
def test_set_target(self):
store = self.StoreClass()
store.parse('{"key": {"message": "value", "description": "note"}}')
store.units[0].target = 'another'
out = BytesIO()
store.serialize(out)
assert out.getvalue() == b'{\n "key": {\n "message": "another",\n "description": "note"\n }\n}\n'
def test_placeholders(self):
DATA = """{
"youCanClose": {
"message": "Bravo ! Votre compte $SITE$ est relié à Scrobbly. Vous pouvez fermer et revenir en arrière",
"placeholders": {
"site": {
"content": "$1",
"example": "AniList"
}
}
}
}
""".encode('utf-8')
store = self.StoreClass()
store.parse(DATA)
assert store.units[0].placeholders is not None
out = BytesIO()
store.serialize(out)
assert out.getvalue() == DATA
class TestI18NextStore(test_monolingual.TestMonolingualStore):
StoreClass = jsonl10n.I18NextFile
def test_serialize(self):
store = self.StoreClass()
store.parse(JSON_I18NEXT)
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_I18NEXT
def test_units(self):
store = self.StoreClass()
store.parse(JSON_I18NEXT)
assert len(store.units) == 4
def test_plurals(self):
store = self.StoreClass()
store.parse(JSON_I18NEXT)
# Remove plurals
store.units[2].target = 'Ahoj'
store.units[3].target = 'Nazdar'
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_I18NEXT_PLURAL
# Bring back plurals
store.units[2].target = multistring([
"the singular",
"the plural",
])
store.units[3].target = multistring([
"the plural form 0",
"the plural form 1",
"the plural form 2",
"the plural form 3",
"the plural form 4",
"the plural form 5"
])
out = BytesIO()
store.serialize(out)
assert out.getvalue() == JSON_I18NEXT
def test_new_plural(self):
EXPECTED = b'''{
"simple": "the singular",
"simple_plural": "the plural",
"complex_0": "the plural form 0",
"complex_1": "the plural form 1",
"complex_2": "the plural form 2",
"complex_3": "the plural form 3",
"complex_4": "the plural form 4",
"complex_5": "the plural form 5"
}
'''
store = self.StoreClass()
unit = self.StoreClass.UnitClass(
multistring([
"the singular",
"the plural",
]),
'simple'
)
store.addunit(unit)
unit = self.StoreClass.UnitClass(
multistring([
"the plural form 0",
"the plural form 1",
"the plural form 2",
"the plural form 3",
"the plural form 4",
"the plural form 5"
]),
'complex'
)
store.addunit(unit)
out = BytesIO()
store.serialize(out)
assert out.getvalue() == EXPECTED
| unho/translate | translate/storage/test_jsonl10n.py | Python | gpl-2.0 | 7,825 |
from setuptools import setup, find_packages
import sys, os
'''
NOTE: Para versionamento usar "MAJOR.MINOR.REVISION.BUILDNUMBER"! By Questor
http://programmers.stackexchange.com/questions/24987/what-exactly-is-the-build-number-in-major-minor-buildnumber-revision
'''
setup(name='liblightbase',
version='0.3.3.0',
description="LightBase Library",
long_description="""\
LightBase Library""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='lightbase-neo ligthbase json database library',
author='Lightbase',
author_email='[email protected]',
url='http://lightbase.com.br/',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
'voluptuous == 0.8.7',
'ply == 3.4',
'decorator == 3.4.0',
'requests == 2.3.0',
'python-dateutil == 2.2',
'six == 1.7.2',
'jsonpath-rw == 1.3.0'])
| lightbase/liblightbase | setup.py | Python | gpl-2.0 | 995 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='SkipRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=64, verbose_name='Sender Key')),
],
options={
'verbose_name': 'Skip request',
'verbose_name_plural': 'Skip requests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.TextField(help_text='Description text for the video', verbose_name='Description', blank=True)),
('youtube_url', models.URLField(help_text='URL to a youtube video', verbose_name='Youtube URL')),
('key', models.CharField(max_length=64, null=True, verbose_name='Sender Key', blank=True)),
('deleted', models.IntegerField(default=False, verbose_name='Deleted')),
('playing', models.BooleanField(default=False, verbose_name='Playing')),
('duration', models.IntegerField(default=0, verbose_name='Duration')),
],
options={
'verbose_name': 'Video',
'verbose_name_plural': 'Videos',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='skiprequest',
name='event',
field=models.ForeignKey(verbose_name='Video', to='manager.Video'),
preserve_default=True,
),
]
| katajakasa/utuputki | Utuputki/manager/migrations/0001_initial.py | Python | mit | 1,910 |
from itertools import chain
from multiqc.plots import linegraph
def plot_indelhist(samples, file_type, **plot_args):
"""Create line graph plot of histogram data for BBMap 'indelhist' output.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
all_x = set()
for item in sorted(chain(*[samples[sample]["data"].items() for sample in samples])):
all_x.add(item[0])
columns_to_plot = {
"Deletions": {
0: "Count",
},
"Insertions": {
1: "Count",
},
}
plot_data = []
for column_type in columns_to_plot:
plot_data.append(
{
sample
+ "."
+ column_name: {
x: samples[sample]["data"][x][column] if x in samples[sample]["data"] else 0 for x in all_x
}
for sample in samples
for column, column_name in columns_to_plot[column_type].items()
}
)
plot_params = {
"id": "bbmap-" + file_type + "_plot",
"title": "BBTools: " + plot_args["plot_title"],
"xlab": "Indel size",
"ylab": "Insertion count",
"data_labels": [
{"name": "Insertions", "ylab": "Insertion count"},
{"name": "Deletions", "ylab": "Deletion count"},
],
}
plot_params.update(plot_args["plot_params"])
plot = linegraph.plot(plot_data, plot_params)
return plot
| ewels/MultiQC | multiqc/modules/bbmap/plot_indelhist.py | Python | gpl-3.0 | 1,537 |
###############################################################################
##
## A sample device driver v0.01
## @Copyright 2014 MySensors Research Project
## SCoRe Lab (www.scorelab.org)
## University of Colombo School of Computing
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from random import randint
import datetime
#following variables will ve used to handle the GPIO ports
GPIOLock=False
GPIOStatus=[False,False,False,False,False,False,False,False]
class myDriver:
def __init__(self):
GPIOLock=False
#Set the public and private key locations
#In order to read the sensor values, separate functions should be implemented.
#For example, if temperature sensor is available, read the value return it
def readTp(self):
return randint(0,50)
#If GPS is available, read the value return it
def readGPS(self):
return randint(0,1000)
#Read the device time and return it
def readTime(self):
now = datetime.datetime.now()
#print now.hour,now.minute,now.second
return '%s,%s,%s' %(now.hour,now.minute,now.second)
#Read the GPIO port status and return it
def readGPIO(self,port):
global GPIOStatus
if GPIOStatus[port]:return 'ON'
else: return 'OFF'
#In order to handle GPIO ports, following functions should be implemented.
#For example, if Senze-> PUT #gpio2 @device, then switch 2 will be turned ON.
def handleON(self,port):
global GPIOLock
global GPIOStatus
#This function should implement the necessary action before it returns the value
#Wait if someone is accessing the gpio ports
c=1
while(GPIOLock):
time.sleep(c)
c+=1
if c>10: return 'ERROR'
GPIOLock=True
#Here we should include the function to turn on the switch
# TURN_ON()
GPIOStatus[port]=True
GPIOLock=False
return 'ON'
#For example, if Senze -> :PUT #gpio2 @device, then switch will be turned OFF
def handleOFF(self,port):
global GPIOLock
global GPIOStatus
#This function should implement necessary action before it returns the value
#Wait if someone is accessing the device
c=1
while(GPIOLock):
time.sleep(c)
c+=1
if c>10: return 'ERROR'
GPIOLock=True
#Here we should include the function to turn off the switch
# TURN_OFF()
GPIOStatus[port]=False
GPIOLock=False
return 'OFF'
sen=myDriver()
print sen.handleON(1)
print sen.handleOFF(1)
print sen.readTp()
print sen.readGPS()
print sen.readTime()
| kasundezoysa/senze | testv/myDriver.py | Python | apache-2.0 | 3,282 |
# encoding: utf-8
"""
Step implementations for section-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.shared import Inches
from helpers import test_docx
# given ====================================================
@given('a section having known page dimension')
def given_a_section_having_known_page_dimension(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[-1]
@given('a section having known page margins')
def given_a_section_having_known_page_margins(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[0]
@given('a section having start type {start_type}')
def given_a_section_having_start_type(context, start_type):
section_idx = {
'CONTINUOUS': 0,
'NEW_PAGE': 1,
'ODD_PAGE': 2,
'EVEN_PAGE': 3,
'NEW_COLUMN': 4,
}[start_type]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
@given('a section known to have {orientation} orientation')
def given_a_section_having_known_orientation(context, orientation):
section_idx = {
'landscape': 0,
'portrait': 1
}[orientation]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
# when =====================================================
@when('I set the {margin_side} margin to {inches} inches')
def when_I_set_the_margin_side_length(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
new_value = Inches(float(inches))
setattr(context.section, prop_name, new_value)
@when('I set the section orientation to {orientation}')
def when_I_set_the_section_orientation(context, orientation):
new_orientation = {
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'None': None,
}[orientation]
context.section.orientation = new_orientation
@when('I set the section page height to {y} inches')
def when_I_set_the_section_page_height_to_y_inches(context, y):
context.section.page_height = Inches(float(y))
@when('I set the section page width to {x} inches')
def when_I_set_the_section_page_width_to_x_inches(context, x):
context.section.page_width = Inches(float(x))
@when('I set the section start type to {start_type}')
def when_I_set_the_section_start_type_to_start_type(context, start_type):
new_start_type = {
'None': None,
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
context.section.start_type = new_start_type
# then =====================================================
@then('the reported {margin_side} margin is {inches} inches')
def then_the_reported_margin_is_inches(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
expected_value = Inches(float(inches))
actual_value = getattr(context.section, prop_name)
assert actual_value == expected_value
@then('the reported page orientation is {orientation}')
def then_the_reported_page_orientation_is_orientation(context, orientation):
expected_value = {
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
}[orientation]
assert context.section.orientation == expected_value
@then('the reported page width is {x} inches')
def then_the_reported_page_width_is_width(context, x):
assert context.section.page_width == Inches(float(x))
@then('the reported page height is {y} inches')
def then_the_reported_page_height_is_11_inches(context, y):
assert context.section.page_height == Inches(float(y))
@then('the reported section start type is {start_type}')
def then_the_reported_section_start_type_is_type(context, start_type):
expected_start_type = {
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
assert context.section.start_type == expected_start_type
| LuoZijun/uOffice | temp/pydocxx/features/steps/section.py | Python | gpl-3.0 | 4,958 |
# coding: utf-8
import unittest
from landchina.spiders.deal import Mapper
from scrapy.http import Request
class UrlMapperTestCase(unittest.TestCase):
def setUp(self):
self.obj = Mapper()
def tearDown(self):
self.obj = None
def test_url_map(self):
prvns = {u'江苏省': '32'}
start_time = '2009-1-1'
end_time = '2009-3-1'
for prvn in self.obj.iterprvn(prvns):
for url in self.obj.iter_url(prvn, start_time, end_time):
self.assertEqual(url is not None, True)
def test_req_map(self):
prvns = {u'江苏省': '32'}
start_time = '2009-1-1'
end_time = '2009-3-1'
for prvn in self.obj.iterprvn(prvns):
for url in self.obj.iter_url(prvn, start_time, end_time):
req = Request(url)
self.assertEqual(isinstance(req, Request), True)
def test_cell_url_map(self):
for url in self.obj.iter_cell_url():
print url
| sundiontheway/landchina-spider | tests/test_url.py | Python | mit | 997 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to construct a TF subgraph implementing distributed All-Reduce."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib import nccl
from tensorflow.python.framework import device as device_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
def _flatten_tensors(tensors):
"""Check tensors for isomorphism and flatten.
Args:
tensors: list of T @{tf.Tensor} which must all have the same shape.
Returns:
tensors: a list of T @{tf.Tensor} which are flattened (1D) views of tensors
shape: the original shape of each element of input tensors
Raises:
ValueError: tensors are empty or non-isomorphic or have unknown shape.
"""
if not tensors:
raise ValueError("tensors cannot be empty")
shape = tensors[0].shape
for tensor in tensors:
shape = shape.merge_with(tensor.shape)
if not shape.is_fully_defined():
raise ValueError("Tensors must have statically known shape.")
if len(shape) != 1:
reshaped = []
for t in tensors:
with ops.colocate_with(t):
reshaped.append(array_ops.reshape(t, [-1]))
tensors = reshaped
return tensors, shape
def _reshape_tensors(tensors, shape):
"""Reshape tensors flattened by _flatten_tensors.
Args:
tensors: list of T @{tf.Tensor} of identical length 1D tensors.
shape: list of integers describing the desired shape. Product of
the elements must equal the length of each tensor.
Returns:
list of T @{tf.Tensor} which are the reshaped inputs.
"""
reshaped = []
for t in tensors:
with ops.colocate_with(t):
reshaped.append(array_ops.reshape(t, shape))
return reshaped
def _padded_split(tensor, pieces):
"""Like split for 1D tensors but pads-out case where len % pieces != 0.
Args:
tensor: T @{tf.Tensor} that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
list of T @{tf.Tensor} of length pieces, which hold the values of
thin input tensor, in order. The final tensor may
be zero-padded on the end to make its size equal to those of all
of the other tensors.
Raises:
ValueError: The input tensor is not 1D.
"""
shape = tensor.shape
if 1 != len(shape):
raise ValueError("input tensor must be 1D")
tensor_len = shape[0].value
with ops.colocate_with(tensor):
if tensor_len % pieces != 0:
# pad to an even length
chunk_size = 1 + tensor_len // pieces
if pieces > tensor_len:
# This is an edge case that should not come up in practice,
# i.e. a different reduction algorithm would be better,
# but we'll make it work just for completeness.
pad_len = pieces - tensor_len
extended_whole = array_ops.concat(
[tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)
parts = array_ops.split(extended_whole, pieces)
return parts, pad_len
elif (pieces - 1) * chunk_size >= tensor_len:
# Another edge case of limited real interest.
pad_len = (pieces * chunk_size) % tensor_len
extended_whole = array_ops.concat(
[tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)
parts = array_ops.split(extended_whole, pieces)
return parts, pad_len
else:
last_chunk_size = tensor_len - (pieces - 1) * chunk_size
pad_len = chunk_size - last_chunk_size
piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]
parts = array_ops.split(tensor, piece_lens)
parts[-1] = array_ops.concat(
[parts[-1], array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)
return parts, pad_len
else:
return array_ops.split(tensor, pieces), 0
def _strip_padding(tensors, pad_len):
"""Strip the suffix padding added by _padded_split.
Args:
tensors: list of T @{tf.Tensor} of identical length 1D tensors.
pad_len: number of elements to be stripped from the end of each tensor.
Returns:
list of T @{tf.Tensor} which are the stripped inputs.
Raises:
ValueError: tensors must be a non-empty list of 1D tensors, and
each must be longer than pad_len.
"""
if not tensors:
raise ValueError("tensors cannot be empty")
shape = tensors[0].shape
if len(shape) > 1:
raise ValueError("tensors must be 1D")
prefix_len = int(shape[0] - pad_len)
if prefix_len < 0:
raise ValueError("pad_len longer than tensor")
stripped = []
for t in tensors:
with ops.colocate_with(t):
stripped.append(array_ops.slice(t, [0], [prefix_len]))
return stripped
def _ragged_split(tensor, pieces):
"""Like split for 1D tensors but allows case where len % pieces != 0.
Args:
tensor: T @{tf.Tensor} that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
list of T @{tf.Tensor} of length pieces, which hold the values of
the input tensor, in order. The final tensor may be shorter
than the others, which will all be of equal length.
Raises:
ValueError: input tensor must be 1D.
"""
shape = tensor.shape
if 1 != len(shape):
raise ValueError("input tensor must be 1D")
tensor_len = shape[0].value
chunk_size = tensor_len // pieces
with ops.colocate_with(tensor):
if tensor_len != (pieces * chunk_size):
# last piece will be short
assert pieces > 1
last_chunk_size = tensor_len - ((pieces - 1) * chunk_size)
assert last_chunk_size > 0
piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]
return array_ops.split(tensor, piece_lens)
else:
return array_ops.split(tensor, pieces)
def _ring_permutations(num_workers, num_subchunks, gpu_perm):
""""Generate an array of device index arrays, one for each subchunk.
In the basic ring reduction algorithm there are size(T)/num_devices
data chunks and each device process one chunk per tick, i.e. sending
one chunk and receiving one chunk. The idea of subchunking is that
each device processes num_subchunks smaller data regions per tick,
and the ring rank permutation is different for each subchunk index
so that a device is potentially sending to and receiving from
num_subchunks different other devices at each tick. Where multiple
independent data channels exist between devices, this strategy
supplies a method of using them in parallel.
Args:
num_workers: number of worker tasks
num_subchunks: number of subchunks into which to divide each per-GPU chunk.
gpu_perm: an array of integers in [0, num_gpus-1] giving the default
ring order of GPUs at each worker. Other permutations will be generated
by rotating this array and splicing together per-worker instances.
Raises:
ValueError: the number of subchunks may not exceed the number of GPUs.
Returns:
pred_by_s_d: list of lists that maps (by index) from (subchunk, dev) to
preceding device in the permutation for that subchunk. The
device index of GPU i at worker j is i + (j * num_gpus).
rank_by_s_d: list of lists that maps (by index) from (subchunk, dev) to
local rank of device d in the permutation for that subchunk.
"""
num_gpus = len(gpu_perm)
devices = num_workers * num_gpus
if devices == 0:
return [], []
if num_subchunks > num_gpus:
raise ValueError(
"num_subchunks %d must be <= num_gpus %d" % (num_subchunks, num_gpus))
rotation_interval = max(1, int(num_gpus / num_subchunks))
perms_by_s = []
for s in range(0, num_subchunks):
full_order = []
offset = s * rotation_interval
for w in range(0, num_workers):
default_order = [(w * num_gpus) + i for i in gpu_perm]
dev_order = default_order[offset:] + default_order[:offset]
full_order += dev_order
perms_by_s.append(full_order)
pred_by_s_d = [[-1 for d in range(0, devices)]
for s in range(0, num_subchunks)]
rank_by_s_d = [[-1 for d in range(0, devices)]
for s in range(0, num_subchunks)]
for s in range(0, num_subchunks):
for d in range(0, devices):
for t in range(0, devices):
if d == perms_by_s[s][t]:
rank_by_s_d[s][d] = t
pred_by_s_d[s][d] = perms_by_s[s][(t + devices - 1) % devices]
break
return (pred_by_s_d, rank_by_s_d)
def build_ring_all_reduce(input_tensors, num_workers, num_subchunks,
gpu_perm, red_op, un_op=None):
"""Construct a subgraph performing a ring-style all-reduce of input_tensors.
Args:
input_tensors: a list of T @{tf.Tensor} objects, which must all
have the same shape and type.
num_workers: number of worker tasks spanned by input_tensors.
num_subchunks: number of subchunks each device should process in one tick.
gpu_perm: a list of ints giving a ring-wise rank ordering of GPUs at
each worker. All workers must have the same number of
GPUs with the same rank ordering. If NVLINK is available, this should
be a ring order supported by NVLINK edges.
red_op: a binary operator for elementwise reduction.
un_op: an optional unary operator to apply to fully reduced values.
Raises:
ValueError: empty input_tensors or they don't all have same
size.
Returns:
a list of T @{tf.Tensor} identical sum-reductions of input_tensors.
"""
if len(input_tensors) < 2:
raise ValueError("input_tensors must be length 2 or longer")
input_tensors, shape = _flatten_tensors(input_tensors)
devices = [t.device for t in input_tensors]
(pred_by_s_d, rank_by_s_d) = _ring_permutations(
num_workers, num_subchunks, gpu_perm)
chunks_by_dev, pad_len = _build_ring_gather(
input_tensors, devices,
num_subchunks, pred_by_s_d, rank_by_s_d, red_op)
if un_op:
chunks_by_dev = _apply_unary_to_chunks(un_op, chunks_by_dev)
output_tensors = _build_ring_scatter(pred_by_s_d, rank_by_s_d,
chunks_by_dev)
if pad_len > 0:
output_tensors = _strip_padding(output_tensors, pad_len)
if len(shape) != 1:
output_tensors = _reshape_tensors(output_tensors, shape)
return output_tensors
def _build_ring_gather(input_tensors, devices, num_subchunks,
pred_by_s_d, rank_by_s_d, red_op):
"""Construct a subgraph for the first (reduction) pass of ring all-reduce.
Args:
input_tensors: a list of T @{tf.Tensor} 1D input tensors of same
shape and type.
devices: array of device name strings
num_subchunks: number of subchunks each device should process in one tick.
pred_by_s_d: as produced by _ring_permutations
rank_by_s_d: as produced by _ring_permutations
red_op: a binary operator for elementwise reduction
Raises:
ValueError: tensors must all be one dimensional.
Returns:
list of list of T @{tf.Tensor} of (partially) reduced values where
exactly num_subchunks chunks at each device are fully reduced.
"""
num_devices = len(input_tensors)
if num_devices == 0:
return []
if num_devices == 1:
return input_tensors
shape = input_tensors[0].shape
if 1 != len(shape):
raise ValueError("input tensors must be 1D")
num_chunks = num_devices * num_subchunks
num_ticks = num_devices - 1
# Initialize chunks_by_dev with splits of the input tensors.
chunks_by_dev = []
split_pad_len = 0
for d in range(0, num_devices):
with ops.device(devices[d]):
splits, split_pad_len = _padded_split(input_tensors[d], num_chunks)
chunks_by_dev.append(splits)
# Reduction phase
for tick in range(0, num_ticks):
# One new partial reduction for every chunk
new_partial_reductions = [None for _ in range(0, num_chunks)]
# Compute reductions with respect to last tick's values
for d in range(0, num_devices):
with ops.device(devices[d]):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (2 + tick)) % num_devices
pred_dev = pred_by_s_d[s][d]
chunk_index = (seg_index * num_subchunks) + s
new_partial_reductions[chunk_index] = red_op(
chunks_by_dev[pred_dev][chunk_index],
chunks_by_dev[d][chunk_index])
# Update chunks_by_dev with the new values at the end of the tick.
for d in range(0, num_devices):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (2 + tick)) % num_devices
chunk_index = (seg_index * num_subchunks) + s
chunks_by_dev[d][chunk_index] = new_partial_reductions[chunk_index]
return chunks_by_dev, split_pad_len
def _apply_unary_to_chunks(f, chunks_by_dev):
"""Apply a unary op to each tensor in chunks_by_dev, on same device.
Args:
f: a unary function over T @{tf.Tensor}.
chunks_by_dev: list of lists of T @{tf.Tensor}.
Returns:
new list of lists of T @{tf.Tensor} with the same structure as
chunks_by_dev containing the derived tensors.
"""
output = []
for x in chunks_by_dev:
with ops.colocate_with(x[0]):
output.append([f(t) for t in x])
return output
def _build_ring_scatter(pred_by_s_d, rank_by_s_d,
chunks_by_dev):
"""Construct subgraph for second (scatter) pass of ring all-reduce.
Args:
pred_by_s_d: as produced by _ring_permutations
rank_by_s_d: as produced by _ring_permutations
chunks_by_dev: list of list of T @{tf.Tensor} indexed by ints
(device, chunk)
Raises:
ValueError: chunks_by_dev is not well-formed
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors, one
at each device corresponding to the outer dimension of chunks_by_dev.
"""
num_devices = len(chunks_by_dev)
num_chunks = len(chunks_by_dev[0])
if 0 != num_chunks % num_devices:
raise ValueError(
"Expect number of chunks per device to be divisible by num_devices")
num_subchunks = int(num_chunks / num_devices)
num_ticks = num_devices - 1
for tick in range(0, num_ticks):
passed_values = [None for _ in range(0, num_chunks)]
for d in range(0, num_devices):
with ops.colocate_with(chunks_by_dev[d][0]):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (1 + tick)) % num_devices
pred_dev = pred_by_s_d[s][d]
chunk_index = (seg_index * num_subchunks) + s
passed_values[chunk_index] = array_ops.identity(
chunks_by_dev[pred_dev][chunk_index])
for d in range(0, num_devices):
for s in range(0, num_subchunks):
rank = rank_by_s_d[s][d]
seg_index = (rank + num_devices - (1 + tick)) % num_devices
chunk_index = (seg_index * num_subchunks) + s
chunks_by_dev[d][chunk_index] = passed_values[chunk_index]
# Join chunks at each device.
output = []
for x in chunks_by_dev:
with ops.colocate_with(x[0]):
output.append(array_ops.concat(x, 0))
return output
def build_recursive_hd_all_reduce(input_tensors, red_op, un_op=None):
"""Construct a subgraph for recursive halving-doubling all-reduce.
The recursive halving-doubling algorithm is described in
http://www.mcs.anl.gov/~thakur/papers/ijhpca-coll.pdf
The concept is to arrange the participating n devices in
a linear sequence where devices exchange data pairwise
with one other device in each round. During the gather
phase there are lg(n) rounds where devices exchange
increasingly smaller sub-tensors with another device
at increasingly greater distances, until at the top
each device has 1/n of the fully reduced values. During the
scatter phase each device exchanges its fully reduced
sub-tensor (which doubles in length at each round)
with one other device at increasingly smaller distances
until each device has all of the fully reduced values.
Note: this preliminary version requires that len(input_tensors) be a
power of 2. TODO(tucker): relax this restriction. Also, the
number of elements in each tensor must be divisible by 2^h where h
is the number of hops in each phase. This will also be relaxed in
the future with edge-case specific logic.
Args:
input_tensors: list of T @{tf.Tensor} to be elementwise reduced.
red_op: a binary elementwise reduction Op.
un_op: an optional unary elementwise Op to apply to reduced values.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors, one
at each device of input_tensors.
Raises:
ValueError: num_devices not a power of 2, or tensor len not divisible
by 2 the proper number of times.
"""
devices = [t.device for t in input_tensors]
input_tensors, shape = _flatten_tensors(input_tensors)
reduced_shards = _build_recursive_hd_gather(input_tensors, devices, red_op)
if un_op:
reduced_shards = [un_op(t) for t in reduced_shards]
output_tensors = _build_recursive_hd_scatter(reduced_shards, devices)
if len(shape) != 1:
output_tensors = _reshape_tensors(output_tensors, shape)
return output_tensors
def _build_recursive_hd_gather(input_tensors, devices, red_op):
"""Construct the gather phase of recursive halving-doubling all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} to be elementwise reduced.
devices: a list of strings naming the devices hosting input_tensors,
which will also be used to host the (partial) reduction values.
red_op: a binary elementwise reduction Op.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensor shards.
Raises:
ValueError: num_devices not a power of 2, or tensor len not divisible
by 2 the proper number of times.
"""
num_devices = len(devices)
num_hops = int(math.log(num_devices, 2))
if num_devices != (2 ** num_hops):
raise ValueError("num_devices must be a power of 2")
chunks = input_tensors
for h in range(0, num_hops):
span = 2 ** h
group_size = span * 2
new_chunks = [[] for _ in devices]
for d in range(0, num_devices):
if (d % group_size) >= (group_size / 2):
# skip right half of a pair
continue
left_dev = devices[d]
right_dev = devices[d + span]
left_split = array_ops.split(chunks[d], 2)
right_split = array_ops.split(chunks[d+span], 2)
with ops.device(left_dev):
new_chunks[d] = red_op(left_split[0], right_split[0])
with ops.device(right_dev):
new_chunks[d + span] = red_op(left_split[1], right_split[1])
chunks = new_chunks
return chunks
def _build_recursive_hd_scatter(input_tensors, devices):
"""Construct the scatter phase of recursive halving-doublng all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} that are fully-reduced shards.
devices: a list of strings naming the devices on which the reconstituted
full tensors should be placed.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors.
"""
num_devices = len(devices)
num_hops = int(math.log(num_devices, 2))
assert num_devices == (2 ** num_hops), "num_devices must be a power of 2"
chunks = input_tensors
for h in reversed(range(0, num_hops)):
span = 2 ** h
group_size = span * 2
new_chunks = [[] for _ in devices]
for d in range(0, num_devices):
if (d % group_size) >= (group_size / 2):
# skip right half of a pair
continue
left_idx = d
right_idx = d + span
left_dev = devices[left_idx]
right_dev = devices[right_idx]
with ops.device(left_dev):
new_chunks[left_idx] = array_ops.concat([chunks[left_idx],
chunks[right_idx]], 0)
with ops.device(right_dev):
new_chunks[right_idx] = array_ops.concat([chunks[left_idx],
chunks[right_idx]], 0)
chunks = new_chunks
return chunks
def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):
"""Construct a subgraph for shuffle all-reduce.
Shuffle reduce is essentially the algorithm implemented when using
parameter servers. Suppose tensor length is n, there are d devices
and g gather shards. Each device sends a n/g length sub-tensor to
each gather shard. The gather shards perform a reduction across d
fragments, then broadcast the result back to each device. The
devices then join the g fully reduced fragments they receive from
the shards. The gather shards could perform d-1 pairwise
reductions, or one d-way reduction. The first is better where
reduction Op time is low compared to transmission time, the second
better in the other case.
Args:
input_tensors: list of T @(tf.Tensor} values to be reduced.
gather_devices: list of names of devices on which reduction shards
should be placed.
red_op: an n-array elementwise reduction Op
un_op: optional elementwise unary Op to be applied to fully-reduced values.
Returns:
list of T @{tf.Tensor} which are the fully reduced tensors.
"""
input_tensors, shape = _flatten_tensors(input_tensors)
dst_devices = [t.device for t in input_tensors]
reduced_shards = _build_shuffle_gather(input_tensors, gather_devices,
red_op, un_op)
output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices)
if len(shape) != 1:
output_tensors = _reshape_tensors(output_tensors, shape)
return output_tensors
def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None):
"""Construct the gather (concentrate and reduce) phase of shuffle all-reduce.
Args:
input_tensors: list of T @(tf.Tensor} values to be reduced.
gather_devices: list of names of devices on which reduction shards
should be placed.
red_op: the binary reduction Op
un_op: optional elementwise unary Op to be applied to fully-reduced values.
Returns:
list of T @{tf.Tensor} which are the fully reduced shards.
Raises:
ValueError: inputs not well-formed.
"""
num_source_devices = len(input_tensors)
num_gather_devices = len(gather_devices)
shape = input_tensors[0].shape
if len(shape) != 1:
raise ValueError("input_tensors must be 1D")
shards_by_source = []
for d in range(0, num_source_devices):
with ops.colocate_with(input_tensors[d]):
shards_by_source.append(
_ragged_split(input_tensors[d], num_gather_devices))
reduced_shards = []
for d in range(0, num_gather_devices):
with ops.device(gather_devices[d]):
values = [s[d] for s in shards_by_source]
red_shard = red_op(values)
if un_op:
red_shard = un_op(red_shard)
reduced_shards.append(red_shard)
return reduced_shards
def _build_shuffle_scatter(reduced_shards, dst_devices):
"""Build the scatter phase of shuffle all-reduce.
Args:
reduced_shards: list of T @(tf.Tensor} fully reduced shards
dst_devices: list of names of devices at which the fully-reduced value
should be reconstituted.
Returns:
list of T @{tf.Tensor} scattered tensors.
"""
num_devices = len(dst_devices)
out_tensors = []
for d in range(0, num_devices):
with ops.device(dst_devices[d]):
out_tensors.append(array_ops.concat(reduced_shards, 0))
return out_tensors
def _split_by_task(devices, values):
"""Partition devices and values by common task.
Args:
devices: list of device name strings
values: list of T @{tf.tensor} of same length as devices.
Returns:
(per_task_devices, per_task_values) where both values are
lists of lists with isomorphic structure: the outer list is
indexed by task, and the inner list has length of the number
of values belonging to that task. per_task_devices contains
the specific devices to which the values are local, and
per_task_values contains the corresponding values.
Raises:
ValueError: devices must be same length as values.
"""
num_devices = len(devices)
if num_devices != len(values):
raise ValueError("len(devices) must equal len(values)")
per_task_devices = collections.OrderedDict()
per_task_values = collections.OrderedDict()
for d in range(num_devices):
d_spec = device_lib.DeviceSpec.from_string(devices[d])
if not hasattr(d_spec, "task") or d_spec.task is None:
assert False, "failed to parse device %s" % devices[d]
index = (d_spec.job or "localhost", d_spec.replica or 0, d_spec.task)
if index not in per_task_devices:
per_task_devices[index] = []
per_task_values[index] = []
per_task_devices[index].append(devices[d])
per_task_values[index].append(values[d])
return (list(per_task_devices.values()), list(per_task_values.values()))
def build_nccl_all_reduce(input_tensors, red_op, un_op=None):
"""Build a subgraph that does one full all-reduce, using NCCL.
Args:
input_tensors: list of T @{tf.Tensor} of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator. Must be one of
{tf.add}
un_op: optional unary elementwise Op to apply to fully-reduce values.
Returns:
list of T @{tf.Tensor} of reduced values.
Raises:
ValueError: red_op not supported.
"""
if red_op == math_ops.add:
output_tensors = nccl.all_sum(input_tensors)
else:
raise ValueError("red_op not supported by NCCL all-reduce: ", red_op)
if un_op:
un_op_wrapped = []
for t in output_tensors:
with ops.colocate_with(t):
un_op_wrapped.append(un_op(t))
output_tensors = un_op_wrapped
return output_tensors
def _build_nccl_hybrid(input_tensors, red_op, upper_level_f):
"""Construct a subgraph for NCCL hybrid all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator.
upper_level_f: function for reducing one value per worker, across
workers.
Returns:
list of T @{tf.Tensor} of reduced values.
Raises:
ValueError: inputs not well-formed.
"""
input_tensors, shape = _flatten_tensors(input_tensors)
devices = [t.device for t in input_tensors]
per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)
num_workers = len(per_worker_devices)
up_values = [None for w in range(0, num_workers)]
up_devices = up_values[:]
down_values = up_values[:]
# First stage: reduce within each worker using NCCL
for w in range(0, num_workers):
worker_values = build_nccl_all_reduce(per_worker_values[w], red_op)
# NOTE: these reductions will not run to completion unless
# every output value is used. Since we only need one, we
# need to put control dependencies on the rest.
with ops.control_dependencies(worker_values):
with ops.device(worker_values[0].device):
up_values[w] = array_ops.identity(worker_values[0])
up_devices[w] = per_worker_devices[w][0]
# Second stage: Apply upper_level_f to reduce across first device at
# each worker
level_2_output = upper_level_f(up_values)
# Third stage: propagate within each worker using NCCL Broadcast
for w in range(0, num_workers):
dst_tensors = []
with ops.device(per_worker_devices[w][0]):
broadcast_src = nccl.broadcast(array_ops.identity(level_2_output[w]))
for d in per_worker_devices[w]:
with ops.device(d):
dst_tensors.append(array_ops.identity(broadcast_src))
down_values[w] = dst_tensors
output_tensors = [v for sublist in down_values for v in sublist]
if len(shape) != 1:
output_tensors = _reshape_tensors(output_tensors, shape)
return output_tensors
def _reduce_non_singleton(input_tensors, red_f, un_op):
"""If input_tensors has more than one element apply red_f, else apply un_op."""
if len(input_tensors) > 1:
return red_f(input_tensors)
else:
if not un_op:
return input_tensors
output_tensors = []
for t in input_tensors:
with ops.colocate_with(t):
output_tensors.append(un_op(t))
return output_tensors
def build_nccl_then_ring(input_tensors, subdiv, red_op, un_op=None):
"""Construct hybrid of NCCL within workers, Ring across workers."""
def upper_builder(y):
return build_ring_all_reduce(y, len(y), subdiv, [0], red_op, un_op)
def upper_level_f(x):
return _reduce_non_singleton(x, upper_builder, un_op)
return _build_nccl_hybrid(input_tensors, red_op, upper_level_f)
def build_nccl_then_recursive_hd(input_tensors, red_op, un_op=None):
"""Construct hybrid of NCCL within workers, Recursive-HD across workers."""
upper_level_f = lambda x: build_recursive_hd_all_reduce(x, red_op, un_op)
return _build_nccl_hybrid(input_tensors, red_op, upper_level_f)
def build_nccl_then_shuffle(input_tensors, gather_devices, nccl_red_op,
shuffle_red_op, un_op=None):
"""Construct hybrid of NCCL within workers, Shuffle across workers."""
upper_level_f = lambda x: build_shuffle_all_reduce(x, gather_devices,
shuffle_red_op, un_op)
return _build_nccl_hybrid(input_tensors, nccl_red_op, upper_level_f)
def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f):
"""Construct a subgraph for Shuffle hybrid all-reduce.
Args:
input_tensors: list of T @{tf.Tensor} of same-shape and type values to
be reduced.
gather_devices: list of device names on which to host gather shards.
red_op: binary elementwise reduction operator.
upper_level_f: function for reducing one value per worker, across
workers.
Returns:
list of T @{tf.Tensor} of reduced values.
Raises:
ValueError: inputs not well-formed.
"""
input_tensors, shape = _flatten_tensors(input_tensors)
# First stage, reduce across each worker using gather_devices.
devices = [t.device for t in input_tensors]
per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)
num_workers = len(per_worker_devices)
up_values = []
if len(gather_devices) != num_workers:
raise ValueError("For shuffle hybrid, gather_devices must contain one "
"device per worker. ")
for w in range(0, num_workers):
reduced_shards = _build_shuffle_gather(
per_worker_values[w], [gather_devices[w]], red_op)
up_values.append(reduced_shards[0])
# Second stage, apply upper_level_f.
level_2_output = upper_level_f(up_values)
# Third stage, apply shuffle scatter at each worker.
output_tensors = []
for w in range(0, num_workers):
output_tensors += _build_shuffle_scatter(
[level_2_output[w]], per_worker_devices[w])
if len(shape) != 1:
output_tensors = _reshape_tensors(output_tensors, shape)
return output_tensors
def build_shuffle_then_ring(input_tensors, gather_devices, subdiv,
red_n_op, red_op, un_op=None):
"""Construct hybrid of Shuffle within workers, Ring across workers."""
def upper_builder(tensors):
return build_ring_all_reduce(tensors, len(tensors), subdiv, [0],
red_op, un_op)
def upper_level_f(tensors):
return _reduce_non_singleton(tensors, upper_builder, un_op)
return _build_shuffle_hybrid(
input_tensors, gather_devices, red_n_op, upper_level_f)
def build_shuffle_then_shuffle(input_tensors, first_gather_devices,
second_gather_devices, red_op, un_op=None):
"""Construct hybrid of Shuffle within workers, Shuffle across workers."""
def upper_builder(tensors):
return build_shuffle_all_reduce(tensors, second_gather_devices,
red_op, un_op)
def upper_level_f(tensors):
return _reduce_non_singleton(tensors, upper_builder, un_op)
return _build_shuffle_hybrid(
input_tensors, first_gather_devices, red_op, upper_level_f)
| lukeiwanski/tensorflow | tensorflow/contrib/all_reduce/python/all_reduce.py | Python | apache-2.0 | 32,747 |
#! /usr/bin/env python
import angr
from angrutils import plot_func_graph
def analyze(b, name):
cfg = b.analyses.CFG(normalize=True)
for func in proj.kb.functions.values():
if func.name.find('Java_') == 0:
plot_func_graph(b, func.transition_graph, "%s_%s_cfg" % (name, func.name), asminst=True, vexinst=False)
if __name__ == "__main__":
proj = angr.Project("../samples/1.6.26-libjsound.so", load_options={'auto_load_libs':False, 'main_opts':{'base_addr':0}})
analyze(proj, "libjsound")
| axt/angr-utils | examples/plot_func_graph/plot_func_graph_example.py | Python | bsd-2-clause | 526 |
"""
title : monitoringAgent.py
description : includes
a) register name prefix
b) response Interest messages which have matching name prefixes
source :
author : Adisorn Lertsinsrubtavee
date : 19 May 2017
version : 1.0
contributors :
usage :
notes :
compile and run : It is a python module imported by a main python programme.
python_version : Python 2.7.12
====================================================
"""
import argparse
import os
import sys
import threading
import time
import traceback
from pprint import pprint
from pyndn import Data
from pyndn import Exclude
from pyndn import Face
from pyndn import Interest
from pyndn import InterestFilter
from pyndn import Name
from pyndn.security import KeyChain
from modules.tools.enumerate_publisher import EnumeratePublisher
from modules.tools.termopi import termopi # class with dictionary data structure
class Monitoring_Agent_Main(object):
def __init__(self, namePrefix, producerName):
self.configPrefix = Name(namePrefix)
self.outstanding = dict()
self.isDone = False
self.keyChain = KeyChain()
self.face = Face("127.0.0.1")
#self.DataStore = DS.readDataStore_json()
#self.DataStore = DS.table
self.script_path = os.path.abspath(__file__) # i.e. /path/to/dir/foobar.py
self.script_dir = os.path.split(self.script_path)[0] #i.e. /path/to/dir/
self.Datamessage_size = 8000 #8kB --> Max Size from NDN standard
self.producerName = producerName
#print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
#pprint(self.DataStore)
def run(self):
try:
self.face.setCommandSigningInfo(self.keyChain, self.keyChain.getDefaultCertificateName())
self.face.registerPrefix(self.configPrefix, self.onInterest, self.onRegisterFailed)
print "Registered prefix : " + self.configPrefix.toUri()
while not self.isDone:
self.face.processEvents()
time.sleep(0.01)
except RuntimeError as e:
print "ERROR: %s" % e
def onInterest(self, prefix, interest, face, interestFilterId, filter):
interestName = interest.getName()
data = Data(interestName)
#print "++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
#pprint(self.DataStore)
print "Interest Name: %s" %interestName
interest_name_components = interestName.toUri().split("/")
if "monitoring" in interest_name_components:
print "Check Pi and Containers Status"
monitoring_agent = termopi()
## Print monitoring data
#monitoring_agent.prt_pi_resources()
print "Update json file"
filename = "piStatus"+self.producerName+".json"
folder_name = "PIstatus/"
rel_path = os.path.join(self.script_dir, folder_name)
if not os.path.exists(rel_path):
os.makedirs(rel_path)
abs_file_path = os.path.join(rel_path, filename)
monitoring_agent.create_jsonfile_with_pi_status(abs_file_path, self.producerName)
freshness = 10 #milli second, content will be deleted from the cache after freshness period
self.sendingFile(abs_file_path, interest, face, freshness)
else:
print "Interest name mismatch"
def onRegisterFailed(self, prefix):
print "Register failed for prefix", prefix.toUri()
self.isDone = True
def sendingFile(self, file_path, interest, face, freshness):
print "Sending File Function"
interestName = interest.getName()
interestNameSize = interestName.size()
try:
SegmentNum = (interestName.get(interestNameSize - 1)).toSegment()
dataName = interestName.getSubName(0, interestNameSize - 1)
# If no segment number is included in the INTEREST, set the segment number as 0 and set the file name to configuration script to be sent
except RuntimeError as e:
SegmentNum = 0
dataName = interestName
# Put file to the Data message
try:
# due to overhead of NDN name and other header values; NDN header overhead + Data packet content = < maxNdnPacketSize
# So Here segment size is hard coded to 5000 KB.
# Class Enumerate publisher is used to split large files into segments and get a required segment ( segment numbers started from 0)
dataSegment, last_segment_num = EnumeratePublisher(file_path, self.Datamessage_size, SegmentNum).getFileSegment()
# create the DATA name appending the segment number
dataName = dataName.appendSegment(SegmentNum)
data = Data(dataName)
data.setContent(dataSegment)
# set the final block ID to the last segment number
last_segment = (Name.Component()).fromNumber(last_segment_num)
data.getMetaInfo().setFinalBlockId(last_segment)
#hourMilliseconds = 600 * 1000
data.getMetaInfo().setFreshnessPeriod(freshness)
# currently Data is signed from the Default Identitiy certificate
self.keyChain.sign(data, self.keyChain.getDefaultCertificateName())
# Sending Data message
face.send(data.wireEncode().toBuffer())
print "Replied to Interest name: %s" % interestName.toUri()
print "Replied with Data name: %s" % dataName.toUri()
except ValueError as err:
print "ERROR: %s" % err
| AdL1398/PiCasso | source/modules/Monitoring/monitoringAgent.py | Python | mit | 5,667 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import shutil
import tempfile
import git
def make_test_repo(name='testrepo'):
def decorator(test):
@functools.wraps(test)
def wrapper(*args, **kwargs):
try:
testrepo = tempfile.mkdtemp()
kwargs[name] = testrepo
repo = git.Repo.init(testrepo)
tf_path = os.path.join(testrepo, 'testfile.txt')
with open(tf_path, 'w') as tf:
tf.write('test content')
repo.index.add([tf_path])
repo.index.commit('test commit')
return test(*args, **kwargs)
finally:
shutil.rmtree(testrepo)
return wrapper
return decorator
| j2sol/giftwrap | giftwrap/tests/utils.py | Python | apache-2.0 | 1,369 |
from oscar.apps.basket.abstract_models import AbstractBasket
from oscar.core.loading import get_class
OrderNumberGenerator = get_class('order.utils', 'OrderNumberGenerator')
class Basket(AbstractBasket):
@property
def order_number(self):
return OrderNumberGenerator().order_number(self)
from oscar.apps.basket.models import * # noqa pylint: disable=wildcard-import,unused-wildcard-import
| janusnic/ecommerce | ecommerce/extensions/basket/models.py | Python | agpl-3.0 | 411 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Create a Normalize Function for PEP 426 names.
Revision ID: 20f4dbe11e9
Revises: 111d8fc0443
Create Date: 2015-04-04 23:29:58.373217
"""
from alembic import op
revision = "20f4dbe11e9"
down_revision = "111d8fc0443"
def upgrade():
op.execute("""
CREATE FUNCTION normalize_pep426_name(text) RETURNS text AS $$
SELECT lower(
regexp_replace(
regexp_replace(
regexp_replace($1, '(\.|_)', '-', 'ig'),
'(1|l|I)', '1', 'ig'
),
'(0|0)', '0', 'ig'
)
)
$$
LANGUAGE SQL
IMMUTABLE
RETURNS NULL ON NULL INPUT;
""")
def downgrade():
op.execute("DROP FUNCTION normalize_pep426_name(text)")
| HonzaKral/warehouse | warehouse/migrations/versions/20f4dbe11e9_normalize_function.py | Python | apache-2.0 | 1,393 |
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import traceback
import os
import time
import urllib
import re
import datetime
import codecs
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import naming
from sickbeard import scene_exceptions
from sickbeard import subtitles
from sickbeard import network_timezones
from sickbeard import sbdatetime
from sickbeard.providers import newznab, rsstorrent
from sickbeard.common import Quality, Overview, statusStrings, qualityPresetStrings, cpu_presets
from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILED, SKIPPED
from sickbeard.common import SD, HD720p, HD1080p
from sickbeard.exceptions import ex
from sickbeard.blackandwhitelist import BlackAndWhiteList
from sickbeard.scene_exceptions import get_scene_exceptions
from sickbeard.browser import foldersAtPath
from sickbeard.scene_numbering import get_scene_numbering, set_scene_numbering, get_scene_numbering_for_show, \
get_xem_numbering_for_show, get_scene_absolute_numbering_for_show, get_xem_absolute_numbering_for_show, \
get_scene_absolute_numbering
from lib.dateutil import tz, parser as dateutil_parser
from lib.unrar2 import RarFile
from lib import adba, subliminal
from lib.trakt import TraktAPI
from lib.trakt.exceptions import traktException, traktAuthException, traktServerBusy
from versionChecker import CheckVersion
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from Cheetah.Template import Template as CheetahTemplate
from Cheetah.Filters import Filter as CheetahFilter
from tornado.routes import route
from tornado.web import RequestHandler, HTTPError, authenticated, asynchronous
from tornado.gen import coroutine
from tornado.ioloop import IOLoop
from tornado.concurrent import run_on_executor
from concurrent.futures import ThreadPoolExecutor
route_locks = {}
class html_entities(CheetahFilter):
def filter(self, val, **dummy_kw):
if isinstance(val, unicode):
filtered = val.encode('ascii', 'xmlcharrefreplace')
elif val is None:
filtered = ''
elif isinstance(val, str):
try:
filtered = val.decode(sickbeard.SYS_ENCODING).encode('ascii', 'xmlcharrefreplace')
except UnicodeDecodeError as e:
logger.log(u'Unable to decode using {0}, trying utf-8. Error is: {1}'.format(sickbeard.SYS_ENCODING, ex(e)),logger.DEBUG)
try:
filtered = val.decode('utf-8').encode('ascii', 'xmlcharrefreplace')
except UnicodeDecodeError as e:
logger.log(u'Unable to decode using utf-8, Error is {0}.'.format(ex(e)),logger.ERROR)
else:
filtered = self.filter(str(val))
return filtered
class PageTemplate(CheetahTemplate):
def __init__(self, rh, *args, **kwargs):
kwargs['file'] = os.path.join(sickbeard.PROG_DIR, "gui/" + sickbeard.GUI_NAME + "/interfaces/default/", kwargs['file'])
kwargs['filter'] = html_entities
super(PageTemplate, self).__init__(*args, **kwargs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
self.sbHandleReverseProxy = sickbeard.HANDLE_REVERSE_PROXY
self.sbThemeName = sickbeard.THEME_NAME
self.sbLogin = rh.get_current_user()
if rh.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", rh.request.headers['Host'], re.X | re.M | re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", rh.request.headers['Host'], re.X | re.M | re.S).group(0)
if "X-Forwarded-Host" in rh.request.headers:
self.sbHost = rh.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in rh.request.headers:
sbHttpPort = rh.request.headers['X-Forwarded-Port']
self.sbHttpsPort = sbHttpPort
if "X-Forwarded-Proto" in rh.request.headers:
self.sbHttpsEnabled = True if rh.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' (' + str(len(classes.ErrorViewer.errors)) + ')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{'title': 'Home', 'key': 'home'},
{'title': 'Coming Episodes', 'key': 'comingEpisodes'},
{'title': 'History', 'key': 'history'},
{'title': 'Manage', 'key': 'manage'},
{'title': 'Config', 'key': 'config'},
{'title': logPageTitle, 'key': 'errorlogs'},
]
def compile(self, *args, **kwargs):
if not os.path.exists(os.path.join(sickbeard.CACHE_DIR, 'cheetah')):
os.mkdir(os.path.join(sickbeard.CACHE_DIR, 'cheetah'))
kwargs['cacheModuleFilesForTracebacks'] = True
kwargs['cacheDirForModuleFiles'] = os.path.join(sickbeard.CACHE_DIR, 'cheetah')
return super(PageTemplate, self).compile(*args, **kwargs)
class BaseHandler(RequestHandler):
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
def set_default_headers(self):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def write_error(self, status_code, **kwargs):
# handle 404 http errors
if status_code == 404:
url = self.request.uri
if sickbeard.WEB_ROOT and self.request.uri.startswith(sickbeard.WEB_ROOT):
url = url[len(sickbeard.WEB_ROOT) + 1:]
if url[:3] != 'api':
return self.redirect('/')
else:
self.finish('Wrong API key used')
elif self.settings.get("debug") and "exc_info" in kwargs:
exc_info = kwargs["exc_info"]
trace_info = ''.join(["%s<br/>" % line for line in traceback.format_exception(*exc_info)])
request_info = ''.join(["<strong>%s</strong>: %s<br/>" % (k, self.request.__dict__[k] ) for k in
self.request.__dict__.keys()])
error = exc_info[1]
self.set_header('Content-Type', 'text/html')
self.finish("""<html>
<title>%s</title>
<body>
<h2>Error</h2>
<p>%s</p>
<h2>Traceback</h2>
<p>%s</p>
<h2>Request Info</h2>
<p>%s</p>
<button onclick="window.location='%s/errorlogs/';">View Log(Errors)</button>
</body>
</html>""" % (error, error, trace_info, request_info, sickbeard.WEB_ROOT))
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
----->>>>> NOTE: Removed self.finish <<<<<-----
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
import urlparse
from tornado.escape import utf8
if not url.startswith(sickbeard.WEB_ROOT):
url = sickbeard.WEB_ROOT + url
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, int) and 300 <= status <= 399
self.set_status(status)
self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
utf8(url)))
def get_current_user(self, *args, **kwargs):
if not isinstance(self, UI) and sickbeard.WEB_USERNAME and sickbeard.WEB_PASSWORD:
return self.get_secure_cookie('sickrage_user')
else:
return True
class WebHandler(BaseHandler):
def __init__(self, *args, **kwargs):
super(WebHandler, self).__init__(*args, **kwargs)
self.io_loop = IOLoop.current()
executor = ThreadPoolExecutor(50)
@authenticated
@coroutine
def get(self, route, *args, **kwargs):
try:
# route -> method obj
route = route.strip('/').replace('.', '_') or 'index'
method = getattr(self, route)
results = yield self.async_call(method)
self.finish(results)
except:
logger.log('Failed doing webui request "%s": %s' % (route, traceback.format_exc()), logger.DEBUG)
raise HTTPError(404)
@run_on_executor
def async_call(self, function):
try:
kwargs = self.request.arguments
for arg, value in kwargs.items():
if len(value) == 1:
kwargs[arg] = value[0]
result = function(**kwargs)
return result
except:
logger.log('Failed doing webui callback: %s' % (traceback.format_exc()), logger.ERROR)
raise
# post uses get method
post = get
class LoginHandler(BaseHandler):
def get(self, *args, **kwargs):
if self.get_current_user():
self.redirect('/home/')
else:
t = PageTemplate(rh=self, file="login.tmpl")
self.finish(t.respond())
def post(self, *args, **kwargs):
api_key = None
username = sickbeard.WEB_USERNAME
password = sickbeard.WEB_PASSWORD
if (self.get_argument('username') == username or not username) \
and (self.get_argument('password') == password or not password):
api_key = sickbeard.API_KEY
if api_key:
remember_me = int(self.get_argument('remember_me', default=0) or 0)
self.set_secure_cookie('sickrage_user', api_key, expires_days=30 if remember_me > 0 else None)
self.redirect('/home/')
class LogoutHandler(BaseHandler):
def get(self, *args, **kwargs):
self.clear_cookie("sickrage_user")
self.redirect('/login/')
class KeyHandler(RequestHandler):
def __init__(self, *args, **kwargs):
super(KeyHandler, self).__init__(*args, **kwargs)
def get(self, *args, **kwargs):
api_key = None
try:
username = sickbeard.WEB_USERNAME
password = sickbeard.WEB_PASSWORD
if (self.get_argument('u', None) == username or not username) and \
(self.get_argument('p', None) == password or not password):
api_key = sickbeard.API_KEY
self.finish({'success': api_key is not None, 'api_key': api_key})
except:
logger.log('Failed doing key request: %s' % (traceback.format_exc()), logger.ERROR)
self.finish({'success': False, 'error': 'Failed returning results'})
@route('(.*)(/?)')
class WebRoot(WebHandler):
def __init__(self, *args, **kwargs):
super(WebRoot, self).__init__(*args, **kwargs)
def index(self):
return self.redirect('/home/')
def robots_txt(self):
""" Keep web crawlers out """
self.set_header('Content-Type', 'text/plain')
return "User-agent: *\nDisallow: /"
def apibuilder(self):
t = PageTemplate(rh=self, file="apiBuilder.tmpl")
def titler(x):
return (helpers.remove_article(x), x)[not x or sickbeard.SORT_ARTICLE]
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
myDB = db.DBConnection(row_type="dict")
seasonSQLResults = {}
episodeSQLResults = {}
for curShow in t.sortedShowList:
seasonSQLResults[curShow.indexerid] = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [curShow.indexerid])
for curShow in t.sortedShowList:
episodeSQLResults[curShow.indexerid] = myDB.select(
"SELECT DISTINCT season,episode FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[curShow.indexerid])
t.seasonSQLResults = seasonSQLResults
t.episodeSQLResults = episodeSQLResults
if len(sickbeard.API_KEY) == 32:
t.apikey = sickbeard.API_KEY
else:
t.apikey = "api key not generated"
return t.respond()
def showPoster(self, show=None, which=None):
# Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
else:
default_image_name = 'banner.png'
# image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'gui', 'slick', 'images', default_image_name)
static_image_path = os.path.join('/images', default_image_name)
if show and sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)):
cache_obj = image_cache.ImageCache()
image_file_name = None
if which == 'poster':
image_file_name = cache_obj.poster_path(show)
if which == 'poster_thumb' or which == 'small':
image_file_name = cache_obj.poster_thumb_path(show)
if which == 'banner':
image_file_name = cache_obj.banner_path(show)
if which == 'banner_thumb':
image_file_name = cache_obj.banner_thumb_path(show)
if ek.ek(os.path.isfile, image_file_name):
static_image_path = os.path.normpath(image_file_name.replace(sickbeard.CACHE_DIR, '/cache'))
static_image_path = static_image_path.replace('\\', '/')
return self.redirect(static_image_path)
def setHomeLayout(self, layout):
if layout not in ('poster', 'small', 'banner', 'simple'):
layout = 'poster'
sickbeard.HOME_LAYOUT = layout
return self.redirect("/home/")
def setPosterSortBy(self, sort):
if sort not in ('name', 'date', 'network', 'progress'):
sort = 'name'
sickbeard.POSTER_SORTBY = sort
sickbeard.save_config()
def setPosterSortDir(self, direction):
sickbeard.POSTER_SORTDIR = int(direction)
sickbeard.save_config()
def setHistoryLayout(self, layout):
if layout not in ('compact', 'detailed'):
layout = 'detailed'
sickbeard.HISTORY_LAYOUT = layout
return self.redirect("/history/")
def toggleDisplayShowSpecials(self, show):
sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS
return self.redirect("/home/displayShow?show=" + show)
def setComingEpsLayout(self, layout):
if layout not in ('poster', 'banner', 'list', 'calendar'):
layout = 'banner'
if layout == 'calendar':
sickbeard.COMING_EPS_SORT = 'date'
sickbeard.COMING_EPS_LAYOUT = layout
return self.redirect("/comingEpisodes/")
def toggleComingEpsDisplayPaused(self):
sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED
return self.redirect("/comingEpisodes/")
def setComingEpsSort(self, sort):
if sort not in ('date', 'network', 'show'):
sort = 'date'
if sickbeard.COMING_EPS_LAYOUT == 'calendar':
sort \
= 'date'
sickbeard.COMING_EPS_SORT = sort
return self.redirect("/comingEpisodes/")
def comingEpisodes(self, layout="None"):
today1 = datetime.date.today()
today = today1.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7))
next_week = next_week1.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
myDB = db.DBConnection()
sql_results = myDB.select(
"SELECT *, tv_shows.status AS show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.indexer_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(
['?'] * len(qualList)) + ")", [today, next_week] + qualList)
for cur_result in sql_results:
done_show_list.append(int(cur_result["showid"]))
more_sql_results = myDB.select(
"SELECT *, tv_shows.status AS show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN (" + ','.join(
['?'] * len(
done_show_list)) + ") AND tv_shows.indexer_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season != 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join(
['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")",
done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results += more_sql_results
more_sql_results = myDB.select(
"SELECT *, tv_shows.status AS show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.indexer_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(
['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList)
sql_results += more_sql_results
# sort by localtime
sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])),
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))),
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))),
}
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results]
# add localtime to the dict
for index, item in enumerate(sql_results):
sql_results[index]['localtime'] = sbdatetime.sbdatetime.convert_to_setting(
network_timezones.parse_date_time(item['airdate'],
item['airs'], item['network']))
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(rh=self, file="comingEpisodes.tmpl")
# paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' }
# paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused'
paused_item = {'title': 'View Paused:', 'path': {'': ''}}
paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else {
'Show': 'toggleComingEpsDisplayPaused'}
t.submenu = [
{'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date',
'Show': 'setComingEpsSort/?sort=show',
'Network': 'setComingEpsSort/?sort=network',
}},
{'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner',
'Poster': 'setComingEpsLayout/?layout=poster',
'List': 'setComingEpsLayout/?layout=list',
'Calendar': 'setComingEpsLayout/?layout=calendar',
}},
paused_item,
]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=network_timezones.sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=network_timezones.sb_timezone)
t.sql_results = sql_results
# Allow local overriding of layout parameter
if layout and layout in ('poster', 'banner', 'list', 'calendar'):
t.layout = layout
else:
t.layout = sickbeard.COMING_EPS_LAYOUT
return t.respond()
class CalendarHandler(BaseHandler):
def get(self, *args, **kwargs):
if sickbeard.CALENDAR_UNPROTECTED:
self.write(self.calendar())
else:
self.calendar_auth()
@authenticated
def calendar_auth(self):
self.write(self.calendar())
# Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito).
#
# iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546>
# Works with iCloud, Google Calendar and Outlook.
def calendar(self):
""" Provides a subscribeable URL for iCal subscriptions
"""
logger.log(u"Receiving iCal request from %s" % self.request.remote_ip)
# Create a iCal string
ical = 'BEGIN:VCALENDAR\r\n'
ical += 'VERSION:2.0\r\n'
ical += 'X-WR-CALNAME:SickRage\r\n'
ical += 'X-WR-CALDESC:SickRage\r\n'
ical += 'PRODID://Sick-Beard Upcoming Episodes//\r\n'
# Limit dates
past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal()
future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal()
# Get all the shows that are not paused and are currently on air (from kjoconnor Fork)
myDB = db.DBConnection()
calendar_shows = myDB.select(
"SELECT show_name, indexer_id, network, airs, runtime FROM tv_shows WHERE ( status = 'Continuing' OR status = 'Returning Series' ) AND paused != '1'")
for show in calendar_shows:
# Get all episodes of this show airing between today and next month
episode_list = myDB.select(
"SELECT indexerid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?",
(past_date, future_date, int(show["indexer_id"])))
utc = tz.gettz('GMT')
for episode in episode_list:
air_date_time = network_timezones.parse_date_time(episode['airdate'], show["airs"],
show['network']).astimezone(utc)
air_date_time_end = air_date_time + datetime.timedelta(
minutes=helpers.tryInt(show["runtime"], 60))
# Create event for episode
ical = ical + 'BEGIN:VEVENT\r\n'
ical = ical + 'DTSTART:' + air_date_time.strftime("%Y%m%d") + 'T' + air_date_time.strftime(
"%H%M%S") + 'Z\r\n'
ical = ical + 'DTEND:' + air_date_time_end.strftime(
"%Y%m%d") + 'T' + air_date_time_end.strftime(
"%H%M%S") + 'Z\r\n'
ical = ical + 'SUMMARY:' + show['show_name'] + ' - ' + str(
episode['season']) + "x" + str(episode['episode']) + " - " + episode['name'] + '\r\n'
ical = ical + 'UID:Sick-Beard-' + str(datetime.date.today().isoformat()) + '-' + show[
'show_name'].replace(" ", "-") + '-E' + str(episode['episode']) + 'S' + str(
episode['season']) + '\r\n'
if episode['description']:
ical = ical + 'DESCRIPTION: {0} on {1} \\n\\n {2}\r\n'.format(
(show['airs'] or '(Unknown airs)'),
(show['network'] or 'Unknown network'),
episode['description'].splitlines()[0])
else:
ical = ical + 'DESCRIPTION:' + (show['airs'] or '(Unknown airs)') + ' on ' + (
show['network'] or 'Unknown network') + '\r\n'
ical = ical + 'END:VEVENT\r\n'
# Ending the iCal
ical += 'END:VCALENDAR'
return ical
@route('/ui(/?.*)')
class UI(WebRoot):
def __init__(self, *args, **kwargs):
super(UI, self).__init__(*args, **kwargs)
def add_message(self):
ui.notifications.message('Test 1', 'This is test number 1')
ui.notifications.error('Test 2', 'This is test number 2')
return "ok"
def get_messages(self):
messages = {}
cur_notification_num = 1
for cur_notification in ui.notifications.get_notifications(self.request.remote_ip):
messages['notification-' + str(cur_notification_num)] = {'title': cur_notification.title,
'message': cur_notification.message,
'type': cur_notification.type}
cur_notification_num += 1
return json.dumps(messages)
@route('/browser(/?.*)')
class WebFileBrowser(WebRoot):
def __init__(self, *args, **kwargs):
super(WebFileBrowser, self).__init__(*args, **kwargs)
def index(self, path='', includeFiles=False, *args, **kwargs):
self.set_header("Content-Type", "application/json")
return json.dumps(foldersAtPath(path, True, bool(int(includeFiles))))
def complete(self, term, includeFiles=0, *args, **kwargs):
self.set_header("Content-Type", "application/json")
paths = [entry['path'] for entry in foldersAtPath(os.path.dirname(term), includeFiles=bool(int(includeFiles)))
if 'path' in entry]
return json.dumps(paths)
@route('/home(/?.*)')
class Home(WebRoot):
def __init__(self, *args, **kwargs):
super(Home, self).__init__(*args, **kwargs)
def HomeMenu(self):
menu = [
{'title': 'Add Shows', 'path': 'home/addShows/', },
{'title': 'Manual Post-Processing', 'path': 'home/postprocess/'},
{'title': 'Update KODI', 'path': 'home/updateKODI/', 'requires': self.haveKODI},
{'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': self.havePLEX},
{'title': 'Manage Torrents', 'path': 'manage/manageTorrents/', 'requires': self.haveTORRENT},
]
return menu
def _genericMessage(self, subject, message):
t = PageTemplate(rh=self, file="genericMessage.tmpl")
t.submenu = self.HomeMenu()
t.subject = subject
t.message = message
return t.respond()
def _getEpisode(self, show, season=None, episode=None, absolute=None):
if show is None:
return "Invalid show parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return "Invalid show paramaters"
if absolute:
epObj = showObj.getEpisode(absolute_number=int(absolute))
elif season and episode:
epObj = showObj.getEpisode(int(season), int(episode))
else:
return "Invalid paramaters"
if epObj is None:
return "Episode couldn't be retrieved"
return epObj
def index(self):
t = PageTemplate(rh=self, file="home.tmpl")
if sickbeard.ANIME_SPLIT_HOME:
shows = []
anime = []
for show in sickbeard.showList:
if show.is_anime:
anime.append(show)
else:
shows.append(show)
t.showlists = [["Shows", shows],
["Anime", anime]]
else:
t.showlists = [["Shows", sickbeard.showList]]
t.submenu = self.HomeMenu()
return t.respond()
def is_alive(self, *args, **kwargs):
if 'callback' in kwargs and '_' in kwargs:
callback, _ = kwargs['callback'], kwargs['_']
else:
return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query string."
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
self.set_header('Content-Type', 'text/javascript')
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
if sickbeard.started:
return callback + '(' + json.dumps(
{"msg": str(sickbeard.PID)}) + ');'
else:
return callback + '(' + json.dumps({"msg": "nope"}) + ');'
def haveKODI(self):
return sickbeard.USE_KODI and sickbeard.KODI_UPDATE_LIBRARY
def havePLEX(self):
return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY
def haveTORRENT(self):
if sickbeard.USE_TORRENTS and sickbeard.TORRENT_METHOD != 'blackhole' \
and (sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'https'
or not sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'http:'):
return True
else:
return False
def testSABnzbd(self, host=None, username=None, password=None, apikey=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_url(host)
connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey)
if connection:
authed, authMsg = sab.testAuthentication(host, username, password, apikey) # @UnusedVariable
if authed:
return "Success. Connected and authenticated"
else:
return "Authentication failed. SABnzbd expects '" + accesMsg + "' as authentication method"
else:
return "Unable to connect to host"
def testTorrent(self, torrent_method=None, host=None, username=None, password=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_url(host)
client = clients.getClientIstance(torrent_method)
connection, accesMsg = client(host, username, password).testAuthentication()
return accesMsg
def testFreeMobile(self, freemobile_id=None, freemobile_apikey=None):
result, message = notifiers.freemobile_notifier.test_notify(freemobile_id, freemobile_apikey)
if result:
return "SMS sent successfully"
else:
return "Problem sending SMS: " + message
def testGrowl(self, host=None, password=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_host(host, default_port=23053)
result = notifiers.growl_notifier.test_notify(host, password)
if password is None or password == '':
pw_append = ''
else:
pw_append = " with password: " + password
if result:
return "Registered and Tested growl successfully " + urllib.unquote_plus(host) + pw_append
else:
return "Registration and Testing of growl failed " + urllib.unquote_plus(host) + pw_append
def testProwl(self, prowl_api=None, prowl_priority=0):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority)
if result:
return "Test prowl notice sent successfully"
else:
return "Test prowl notice failed"
def testBoxcar(self, username=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.boxcar_notifier.test_notify(username)
if result:
return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked"
else:
return "Error sending Boxcar notification"
def testBoxcar2(self, accesstoken=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.boxcar2_notifier.test_notify(accesstoken)
if result:
return "Boxcar2 notification succeeded. Check your Boxcar2 clients to make sure it worked"
else:
return "Error sending Boxcar2 notification"
def testPushover(self, userKey=None, apiKey=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.pushover_notifier.test_notify(userKey, apiKey)
if result:
return "Pushover notification succeeded. Check your Pushover clients to make sure it worked"
else:
return "Error sending Pushover notification"
def twitterStep1(self):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
return notifiers.twitter_notifier._get_authorization()
def twitterStep2(self, key):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.twitter_notifier._get_credentials(key)
logger.log(u"result: " + str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
def testTwitter(self):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.twitter_notifier.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
def testKODI(self, host=None, username=None, password=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_hosts(host)
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.kodi_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test KODI notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test KODI notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
def testPLEX(self, host=None, username=None, password=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
def testLibnotify(self):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
if notifiers.libnotify_notifier.test_notify():
return "Tried sending desktop notification via libnotify"
else:
return notifiers.libnotify.diagnose()
def testNMJ(self, host=None, database=None, mount=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_host(host)
result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount)
if result:
return "Successfully started the scan update"
else:
return "Test failed to start the scan update"
def settingsNMJ(self, host=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_host(host)
result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host))
if result:
return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {
"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT}
else:
return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}'
def testNMJv2(self, host=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_host(host)
result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host))
if result:
return "Test notice sent successfully to " + urllib.unquote_plus(host)
else:
return "Test notice failed to " + urllib.unquote_plus(host)
def settingsNMJv2(self, host=None, dbloc=None, instance=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_host(host)
result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance)
if result:
return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host,
"database": sickbeard.NMJv2_DATABASE}
else:
return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {
"dbloc": dbloc}
def testTrakt(self, username=None, password=None, disable_ssl=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
if disable_ssl == 'true':
disable_ssl = True
else:
disable_ssl = False
return notifiers.trakt_notifier.test_notify(username, password, disable_ssl)
def loadShowNotifyLists(self):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
myDB = db.DBConnection()
rows = myDB.select("SELECT show_id, show_name, notify_list FROM tv_shows ORDER BY show_name ASC")
data = {}
size = 0
for r in rows:
data[r['show_id']] = {'id': r['show_id'], 'name': r['show_name'], 'list': r['notify_list']}
size += 1
data['_size'] = size
return json.dumps(data)
def saveShowNotifyList(self, show=None, emails=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
myDB = db.DBConnection()
if myDB.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", [emails, show]):
return 'OK'
else:
return 'ERROR: %s' % myDB.last_err
def testEmail(self, host=None, port=None, smtp_from=None, use_tls=None, user=None, pwd=None, to=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
host = config.clean_host(host)
if notifiers.email_notifier.test_notify(host, port, smtp_from, use_tls, user, pwd, to):
return 'Test email sent successfully! Check inbox.'
else:
return 'ERROR: %s' % notifiers.email_notifier.last_err
def testNMA(self, nma_api=None, nma_priority=0):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.nma_notifier.test_notify(nma_api, nma_priority)
if result:
return "Test NMA notice sent successfully"
else:
return "Test NMA notice failed"
def testPushalot(self, authorizationToken=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.pushalot_notifier.test_notify(authorizationToken)
if result:
return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked"
else:
return "Error sending Pushalot notification"
def testPushbullet(self, api=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.pushbullet_notifier.test_notify(api)
if result:
return "Pushbullet notification succeeded. Check your device to make sure it worked"
else:
return "Error sending Pushbullet notification"
def getPushbulletDevices(self, api=None):
# self.set_header('Cache-Control', 'max-age=0,no-cache,no-store')
result = notifiers.pushbullet_notifier.get_devices(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
def shutdown(self, pid=None):
if str(pid) != str(sickbeard.PID):
return self.redirect("/home/")
sickbeard.events.put(sickbeard.events.SystemEvent.SHUTDOWN)
title = "Shutting down"
message = "SickRage is shutting down..."
return self._genericMessage(title, message)
def restart(self, pid=None):
if str(pid) != str(sickbeard.PID):
return self.redirect("/home/")
t = PageTemplate(rh=self, file="restart.tmpl")
t.submenu = self.HomeMenu()
# restart
sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
return t.respond()
def updateCheck(self, pid=None):
if str(pid) != str(sickbeard.PID):
return self.redirect('/home/')
sickbeard.versionCheckScheduler.action.check_for_new_version(force=True)
return self.redirect('/home/')
def update(self, pid=None):
if str(pid) != str(sickbeard.PID):
return self.redirect('/home/')
checkversion = CheckVersion()
backup = checkversion._runbackup()
if backup == True:
if sickbeard.versionCheckScheduler.action.update():
# do a hard restart
sickbeard.events.put(sickbeard.events.SystemEvent.RESTART)
t = PageTemplate(rh=self, file="restart.tmpl")
return t.respond()
else:
return self._genericMessage("Update Failed",
"Update wasn't successful, not restarting. Check your log for more information.")
else:
return self.redirect('/home/')
def branchCheckout(self, branch):
if sickbeard.BRANCH != branch:
sickbeard.BRANCH = branch
ui.notifications.message('Checking out branch: ', branch)
return self.update(sickbeard.PID)
else:
ui.notifications.message('Already on branch: ', branch)
return self.redirect('/home')
def getDBcompare(self, branchDest=None):
checkversion = CheckVersion()
db_status = checkversion.getDBcompare(branchDest)
if db_status == 'upgrade':
logger.log(u"Checkout branch has a new DB version - Upgrade", logger.DEBUG)
return json.dumps({ "status": "success", 'message': 'upgrade' })
elif db_status == 'equal':
logger.log(u"Checkout branch has the same DB version - Equal", logger.DEBUG)
return json.dumps({ "status": "success", 'message': 'equal' })
elif db_status == 'downgrade':
logger.log(u"Checkout branch has an old DB version - Downgrade", logger.DEBUG)
return json.dumps({ "status": "success", 'message': 'downgrade' })
else:
logger.log(u"Checkout branch couldn't compare DB version.", logger.ERROR)
return json.dumps({ "status": "error", 'message': 'General exception' })
def displayShow(self, show=None):
if show is None:
return self._genericMessage("Error", "Invalid show ID")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return self._genericMessage("Error", "Show not in show list")
myDB = db.DBConnection()
seasonResults = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC",
[showObj.indexerid]
)
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[showObj.indexerid]
)
t = PageTemplate(rh=self, file="displayShow.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.indexerid}]
try:
t.showLoc = (showObj.location, True)
except sickbeard.exceptions.ShowDirNotFoundException:
t.showLoc = (showObj._location, False)
show_message = ''
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj):
show_message = 'This show is in the process of being downloaded - the info below is incomplete.'
elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj):
show_message = 'The information on this page is in the process of being updated.'
elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj):
show_message = 'The episodes below are currently being refreshed from disk'
elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj):
show_message = 'Currently downloading subtitles for this show'
elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj):
show_message = 'This show is queued to be refreshed.'
elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj):
show_message = 'This show is queued and awaiting an update.'
elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj):
show_message = 'This show is queued and awaiting subtitles download.'
if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj):
if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj):
t.submenu.append(
{'title': 'Remove', 'path': 'home/deleteShow?show=%d' % showObj.indexerid, 'confirm': True})
t.submenu.append({'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d' % showObj.indexerid})
t.submenu.append(
{'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&force=1' % showObj.indexerid})
t.submenu.append({'title': 'Update show in KODI',
'path': 'home/updateKODI?showName=%s' % urllib.quote_plus(
showObj.name.encode('utf-8')), 'requires': self.haveKODI})
t.submenu.append({'title': 'Preview Rename', 'path': 'home/testRename?show=%d' % showObj.indexerid})
if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled(
showObj) and showObj.subtitles:
t.submenu.append(
{'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d' % showObj.indexerid})
t.show = showObj
t.sqlResults = sqlResults
t.seasonResults = seasonResults
t.show_message = show_message
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
for curResult in sqlResults:
curEpCat = showObj.getOverview(int(curResult["status"] or -1))
if curEpCat:
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
def titler(x):
return (helpers.remove_article(x), x)[not x or sickbeard.SORT_ARTICLE]
if sickbeard.ANIME_SPLIT_HOME:
shows = []
anime = []
for show in sickbeard.showList:
if show.is_anime:
anime.append(show)
else:
shows.append(show)
t.sortedShowLists = [["Shows", sorted(shows, lambda x, y: cmp(titler(x.name), titler(y.name)))],
["Anime", sorted(anime, lambda x, y: cmp(titler(x.name), titler(y.name)))]]
else:
t.sortedShowLists = [
["Shows", sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))]]
t.bwl = None
if showObj.is_anime:
t.bwl = BlackAndWhiteList(showObj.indexerid)
t.epCounts = epCounts
t.epCats = epCats
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.indexerid)
indexerid = int(showObj.indexerid)
indexer = int(showObj.indexer)
t.all_scene_exceptions = showObj.exceptions
t.scene_numbering = get_scene_numbering_for_show(indexerid, indexer)
t.xem_numbering = get_xem_numbering_for_show(indexerid, indexer)
t.scene_absolute_numbering = get_scene_absolute_numbering_for_show(indexerid, indexer)
t.xem_absolute_numbering = get_xem_absolute_numbering_for_show(indexerid, indexer)
return t.respond()
def plotDetails(self, show, season, episode):
myDB = db.DBConnection()
result = myDB.selectOne(
"SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
(int(show), int(season), int(episode)))
return result['description'] if result else 'Episode not found.'
def sceneExceptions(self, show):
exceptionsList = sickbeard.scene_exceptions.get_all_scene_exceptions(show)
if not exceptionsList:
return "No scene exceptions"
out = []
for season, names in iter(sorted(exceptionsList.iteritems())):
if season == -1:
season = "*"
out.append("S" + str(season) + ": " + ", ".join(names))
return "<br/>".join(out)
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[],
flatten_folders=None, paused=None, directCall=False, air_by_date=None, sports=None, dvdorder=None,
indexerLang=None, subtitles=None, archive_firstmatch=None, rls_ignore_words=None,
rls_require_words=None, anime=None, blackWords=None, whiteWords=None, blacklist=None, whitelist=None,
scene=None, defaultEpStatus=None):
anidb_failed = False
if show is None:
errString = "Invalid show ID: " + str(show)
if directCall:
return [errString]
else:
return self._genericMessage("Error", errString)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if not showObj:
errString = "Unable to find the specified show: " + str(show)
if directCall:
return [errString]
else:
return self._genericMessage("Error", errString)
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.indexerid)
if not location and not anyQualities and not bestQualities and not flatten_folders:
t = PageTemplate(rh=self, file="editShow.tmpl")
t.submenu = self.HomeMenu()
if showObj.is_anime:
bwl = BlackAndWhiteList(showObj.indexerid)
t.whiteWords = ""
if "global" in bwl.whiteDict:
t.whiteWords = ", ".join(bwl.whiteDict["global"])
t.blackWords = ""
if "global" in bwl.blackDict:
t.blackWords = ", ".join(bwl.blackDict["global"])
t.whitelist = []
if bwl.whiteDict.has_key("release_group"):
t.whitelist = bwl.whiteDict["release_group"]
t.blacklist = []
if bwl.blackDict.has_key("release_group"):
t.blacklist = bwl.blackDict["release_group"]
t.groups = []
if helpers.set_up_anidb_connection() and not anidb_failed:
try:
anime = adba.Anime(sickbeard.ADBA_CONNECTION, name=showObj.name)
t.groups = anime.get_groups()
except Exception as e:
anidb_failed = True
ui.notifications.error('Unable to retreive Fansub Groups from AniDB.')
with showObj.lock:
t.show = showObj
t.scene_exceptions = get_scene_exceptions(showObj.indexerid)
return t.respond()
flatten_folders = config.checkbox_to_value(flatten_folders)
dvdorder = config.checkbox_to_value(dvdorder)
archive_firstmatch = config.checkbox_to_value(archive_firstmatch)
paused = config.checkbox_to_value(paused)
air_by_date = config.checkbox_to_value(air_by_date)
scene = config.checkbox_to_value(scene)
sports = config.checkbox_to_value(sports)
anime = config.checkbox_to_value(anime)
subtitles = config.checkbox_to_value(subtitles)
if indexerLang and indexerLang in sickbeard.indexerApi(showObj.indexer).indexer().config['valid_languages']:
indexer_lang = indexerLang
else:
indexer_lang = showObj.lang
# if we changed the language then kick off an update
if indexer_lang == showObj.lang:
do_update = False
else:
do_update = True
if scene == showObj.scene and anime == showObj.anime:
do_update_scene_numbering = False
else:
do_update_scene_numbering = True
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
if type(exceptions_list) != list:
exceptions_list = [exceptions_list]
# If directCall from mass_edit_update no scene exceptions handling or blackandwhite list handling
if directCall:
do_update_exceptions = False
else:
if set(exceptions_list) == set(showObj.exceptions):
do_update_exceptions = False
else:
do_update_exceptions = True
if showObj.is_anime:
bwl = BlackAndWhiteList(showObj.indexerid)
if whitelist:
whitelist = whitelist.split(",")
shortWhiteList = []
if helpers.set_up_anidb_connection() and not anidb_failed:
try:
for groupName in whitelist:
group = sickbeard.ADBA_CONNECTION.group(gname=groupName)
for line in group.datalines:
if line["shortname"]:
shortWhiteList.append(line["shortname"])
else:
if not groupName in shortWhiteList:
shortWhiteList.append(groupName)
except Exception as e:
anidb_failed = True
ui.notifications.error('Unable to retreive data from AniDB.')
shortWhiteList = whitelist
else:
shortWhiteList = whitelist
bwl.set_white_keywords_for("release_group", shortWhiteList)
else:
bwl.set_white_keywords_for("release_group", [])
if blacklist:
blacklist = blacklist.split(",")
shortBlacklist = []
if helpers.set_up_anidb_connection() and not anidb_failed:
try:
for groupName in blacklist:
group = sickbeard.ADBA_CONNECTION.group(gname=groupName)
for line in group.datalines:
if line["shortname"]:
shortBlacklist.append(line["shortname"])
else:
if not groupName in shortBlacklist:
shortBlacklist.append(groupName)
except Exception as e:
anidb_failed = True
ui.notifications.error('Unable to retreive data from AniDB.')
shortBlacklist = blacklist
else:
shortBlacklist = blacklist
bwl.set_black_keywords_for("release_group", shortBlacklist)
else:
bwl.set_black_keywords_for("release_group", [])
if whiteWords:
whiteWords = [x.strip() for x in whiteWords.split(",")]
bwl.set_white_keywords_for("global", whiteWords)
else:
bwl.set_white_keywords_for("global", [])
if blackWords:
blackWords = [x.strip() for x in blackWords.split(",")]
bwl.set_black_keywords_for("global", blackWords)
else:
bwl.set_black_keywords_for("global", [])
errors = []
with showObj.lock:
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
showObj.quality = newQuality
showObj.archive_firstmatch = archive_firstmatch
# reversed for now
if bool(showObj.flatten_folders) != bool(flatten_folders):
showObj.flatten_folders = flatten_folders
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show: " + ex(e))
showObj.paused = paused
showObj.scene = scene
showObj.anime = anime
showObj.sports = sports
showObj.subtitles = subtitles
showObj.air_by_date = air_by_date
showObj.default_ep_status = int(defaultEpStatus)
if not directCall:
showObj.lang = indexer_lang
showObj.dvdorder = dvdorder
showObj.rls_ignore_words = rls_ignore_words.strip()
showObj.rls_require_words = rls_require_words.strip()
# if we change location clear the db of episodes, change it, write to db, and rescan
if os.path.normpath(showObj._location) != os.path.normpath(location):
logger.log(os.path.normpath(showObj._location) + " != " + os.path.normpath(location), logger.DEBUG)
if not ek.ek(os.path.isdir, location) and not sickbeard.CREATE_MISSING_SHOW_DIRS:
errors.append("New location <tt>%s</tt> does not exist" % location)
# don't bother if we're going to update anyway
elif not do_update:
# change it
try:
showObj.location = location
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show:" + ex(e))
# grab updated info from TVDB
# showObj.loadEpisodesFromIndexer()
# rescan the episodes in the new folder
except exceptions.NoNFOException:
errors.append(
"The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in SickRage." % location)
# save it to the DB
showObj.saveToDB()
# force the update
if do_update:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True)
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on the show.")
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(showObj.indexerid, exceptions_list) # @UndefinedVdexerid)
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene exceptions of the show.")
if do_update_scene_numbering:
try:
sickbeard.scene_numbering.xem_refresh(showObj.indexerid, showObj.indexer)
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene numbering of the show.")
if directCall:
return errors
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
'<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>")
return self.redirect("/home/displayShow?show=" + show)
def deleteShow(self, show=None, full=0):
if show is None:
return self._genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return self._genericMessage("Error", "Unable to find the specified show")
if sickbeard.showQueueScheduler.action.isBeingAdded(
showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj):
return self._genericMessage("Error", "Shows can't be deleted while they're being added or updated.")
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC:
# remove show from trakt.tv library
try:
sickbeard.traktCheckerScheduler.action.removeShowFromTraktLibrary(showObj)
except traktException as e:
logger.log("Trakt: Unable to delete show: {0}. Error: {1}".format(showObj.name, ex(e)),logger.ERROR)
return self._genericMessage("Error", "Unable to delete show: {0}".format(showObj.name))
showObj.deleteShow(bool(full))
ui.notifications.message('<b>%s</b> has been %s %s' %
(showObj.name,
('deleted', 'trashed')[sickbeard.TRASH_REMOVE_SHOW],
('(media untouched)', '(with all related media)')[bool(full)]))
return self.redirect("/home/")
def refreshShow(self, show=None):
if show is None:
return self._genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return self._genericMessage("Error", "Unable to find the specified show")
# force the update from the DB
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj)
except exceptions.CantRefreshException, e:
ui.notifications.error("Unable to refresh this show.",
ex(e))
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
return self.redirect("/home/displayShow?show=" + str(showObj.indexerid))
def updateShow(self, show=None, force=0):
if show is None:
return self._genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return self._genericMessage("Error", "Unable to find the specified show")
# force the update
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force))
except exceptions.CantUpdateException, e:
ui.notifications.error("Unable to update this show.", ex(e))
# just give it some time
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
return self.redirect("/home/displayShow?show=" + str(showObj.indexerid))
def subtitleShow(self, show=None, force=0):
if show is None:
return self._genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return self._genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force))
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
return self.redirect("/home/displayShow?show=" + str(showObj.indexerid))
def updateKODI(self, showName=None):
# only send update to first host in the list -- workaround for kodi sql backend users
if sickbeard.KODI_UPDATE_ONLYFIRST:
# only send update to first host in the list -- workaround for kodi sql backend users
host = sickbeard.KODI_HOST.split(",")[0].strip()
else:
host = sickbeard.KODI_HOST
if notifiers.kodi_notifier.update_library(showName=showName):
ui.notifications.message("Library update command sent to KODI host(s): " + host)
else:
ui.notifications.error("Unable to contact one or more KODI host(s): " + host)
return self.redirect('/home/')
def updatePLEX(self):
if notifiers.plex_notifier.update_library():
ui.notifications.message(
"Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
else:
ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
return self.redirect('/home/')
def setStatus(self, show=None, eps=None, status=None, direct=False):
if show is None or eps is None or status is None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return self._genericMessage("Error", errMsg)
if not statusStrings.has_key(int(status)):
errMsg = "Invalid status"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return self._genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
errMsg = "Error", "Show not in show list"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return self._genericMessage("Error", errMsg)
segments = {}
trakt_data = []
if eps is not None:
sql_l = []
for curEp in eps.split('|'):
logger.log(u"Attempting to set status on episode " + curEp + " to " + status, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
if epObj is None:
return self._genericMessage("Error", "Episode couldn't be retrieved")
if int(status) in [WANTED, FAILED]:
# figure out what episodes are wanted so we can backlog them
if epObj.season in segments:
segments[epObj.season].append(epObj)
else:
segments[epObj.season] = [epObj]
with epObj.lock:
# don't let them mess up UNAIRED episodes
if epObj.status == UNAIRED:
logger.log(u"Refusing to change status of " + curEp + " because it is UNAIRED", logger.ERROR)
continue
if int(
status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.DOWNLOADED + [
IGNORED] and not ek.ek(os.path.isfile, epObj.location):
logger.log(
u"Refusing to change status of " + curEp + " to DOWNLOADED because it's not SNATCHED/DOWNLOADED",
logger.ERROR)
continue
if int(
status) == FAILED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.DOWNLOADED:
logger.log(
u"Refusing to change status of " + curEp + " to FAILED because it's not SNATCHED/DOWNLOADED",
logger.ERROR)
continue
if epObj.status in Quality.DOWNLOADED and int(status) == WANTED:
logger.log(u"Removing release_name for episode as you want to set a downloaded episode back to wanted, so obviously you want it replaced")
epObj.release_name = ""
epObj.status = int(status)
# mass add to database
sql_l.append(epObj.get_sql())
trakt_data.append((epObj.season, epObj.episode))
data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data)
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
if int(status) in [WANTED, FAILED]:
logger.log(u"Add episodes, showid: indexerid " + str(showObj.indexerid) + ", Title " + str(showObj.name) + " to Watchlist", logger.DEBUG)
upd = "add"
elif int(status) in [ARCHIVED, IGNORED, SKIPPED ] + Quality.DOWNLOADED:
logger.log(u"Remove episodes, showid: indexerid " + str(showObj.indexerid) + ", Title " + str(showObj.name) + " from Watchlist", logger.DEBUG)
upd = "remove"
if data:
notifiers.trakt_notifier.update_watchlist(showObj, data_episode=data, update=upd)
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
if int(status) == WANTED and not showObj.paused:
msg = "Backlog was automatically started for the following seasons of <b>" + showObj.name + "</b>:<br />"
msg += '<ul>'
for season, segment in segments.items():
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item)
msg += "<li>Season " + str(season) + "</li>"
logger.log(u"Sending backlog for " + showObj.name + " season " + str(
season) + " because some eps were set to wanted")
msg += "</ul>"
if segments:
ui.notifications.message("Backlog started", msg)
elif int(status) == WANTED and showObj.paused:
logger.log(u"Some episodes were set to wanted, but " + showObj.name + " is paused. Not adding to Backlog until show is unpaused")
if int(status) == FAILED:
msg = "Retrying Search was automatically started for the following season of <b>" + showObj.name + "</b>:<br />"
msg += '<ul>'
for season, segment in segments.items():
cur_failed_queue_item = search_queue.FailedQueueItem(showObj, segment)
sickbeard.searchQueueScheduler.action.add_item(cur_failed_queue_item)
msg += "<li>Season " + str(season) + "</li>"
logger.log(u"Retrying Search for " + showObj.name + " season " + str(
season) + " because some eps were set to failed")
msg += "</ul>"
if segments:
ui.notifications.message("Retry Search started", msg)
if direct:
return json.dumps({'result': 'success'})
else:
return self.redirect("/home/displayShow?show=" + show)
def testRename(self, show=None):
if show is None:
return self._genericMessage("Error", "You must specify a show")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return self._genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location # @UnusedVariable
except exceptions.ShowDirNotFoundException:
return self._genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
ep_obj_list = showObj.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
if ep_obj_rename_list:
# present season DESC episode DESC on screen
ep_obj_rename_list.reverse()
t = PageTemplate(rh=self, file="testRename.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.indexerid}]
t.ep_obj_list = ep_obj_rename_list
t.show = showObj
return t.respond()
def doRename(self, show=None, eps=None):
if show is None or eps is None:
errMsg = "You must specify a show and at least one episode"
return self._genericMessage("Error", errMsg)
show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if show_obj is None:
errMsg = "Error", "Show not in show list"
return self._genericMessage("Error", errMsg)
try:
show_loc = show_obj.location # @UnusedVariable
except exceptions.ShowDirNotFoundException:
return self._genericMessage("Error", "Can't rename episodes when the show dir is missing.")
if eps is None:
return self.redirect("/home/displayShow?show=" + show)
myDB = db.DBConnection()
for curEp in eps.split('|'):
epInfo = curEp.split('x')
# this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database
ep_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5",
[show, epInfo[0], epInfo[1]])
if not ep_result:
logger.log(u"Unable to find an episode for " + curEp + ", skipping", logger.WARNING)
continue
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?",
[ep_result[0]["location"], epInfo[1]])
root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1]))
root_ep_obj.relatedEps = []
for cur_related_ep in related_eps_result:
related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep_obj not in root_ep_obj.relatedEps:
root_ep_obj.relatedEps.append(related_ep_obj)
root_ep_obj.rename()
return self.redirect("/home/displayShow?show=" + show)
def searchEpisode(self, show=None, season=None, episode=None, downCurQuality=0):
# retrieve the episode object and fail if we can't get one
ep_obj = self._getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj.show, ep_obj, bool(int(downCurQuality)))
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item)
if not ep_queue_item.started and ep_queue_item.success is None:
return json.dumps(
{'result': 'success'}) # I Actually want to call it queued, because the search hasnt been started yet!
if ep_queue_item.started and ep_queue_item.success is None:
return json.dumps({'result': 'success'})
else:
return json.dumps({'result': 'failure'})
### Returns the current ep_queue_item status for the current viewed show.
# Possible status: Downloaded, Snatched, etc...
# Returns {'show': 279530, 'episodes' : ['episode' : 6, 'season' : 1, 'searchstatus' : 'queued', 'status' : 'running', 'quality': '4013']
def getManualSearchStatus(self, show=None):
def getEpisodes(searchThread, searchstatus):
results = []
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(searchThread.show.indexerid))
if isinstance(searchThread, sickbeard.search_queue.ManualSearchQueueItem):
results.append({'show': searchThread.show.indexerid,
'episode': searchThread.segment.episode,
'episodeindexid': searchThread.segment.indexerid,
'season': searchThread.segment.season,
'searchstatus': searchstatus,
'status': statusStrings[searchThread.segment.status],
'quality': self.getQualityClass(searchThread.segment),
'overview': Overview.overviewStrings[showObj.getOverview(int(searchThread.segment.status or -1))]})
else:
for epObj in searchThread.segment:
results.append({'show': epObj.show.indexerid,
'episode': epObj.episode,
'episodeindexid': epObj.indexerid,
'season': epObj.season,
'searchstatus': searchstatus,
'status': statusStrings[epObj.status],
'quality': self.getQualityClass(epObj),
'overview': Overview.overviewStrings[showObj.getOverview(int(epObj.status or -1))]})
return results
episodes = []
# Queued Searches
searchstatus = 'queued'
for searchThread in sickbeard.searchQueueScheduler.action.get_all_ep_from_queue(show):
episodes += getEpisodes(searchThread, searchstatus)
# Running Searches
searchstatus = 'searching'
if (sickbeard.searchQueueScheduler.action.is_manualsearch_in_progress()):
searchThread = sickbeard.searchQueueScheduler.action.currentItem
if searchThread.success:
searchstatus = 'finished'
episodes += getEpisodes(searchThread, searchstatus)
# Finished Searches
searchstatus = 'finished'
for searchThread in sickbeard.search_queue.MANUAL_SEARCH_HISTORY:
if show is not None:
if not str(searchThread.show.indexerid) == show:
continue
if isinstance(searchThread, sickbeard.search_queue.ManualSearchQueueItem):
if not [x for x in episodes if x['episodeindexid'] == searchThread.segment.indexerid]:
episodes += getEpisodes(searchThread, searchstatus)
else:
### These are only Failed Downloads/Retry SearchThreadItems.. lets loop through the segement/episodes
if not [i for i, j in zip(searchThread.segment, episodes) if i.indexerid == j['episodeindexid']]:
episodes += getEpisodes(searchThread, searchstatus)
return json.dumps({'episodes': episodes})
def getQualityClass(self, ep_obj):
# return the correct json value
# Find the quality class for the episode
quality_class = Quality.qualityStrings[Quality.UNKNOWN]
ep_status, ep_quality = Quality.splitCompositeStatus(ep_obj.status)
for x in (SD, HD720p, HD1080p):
if ep_quality in Quality.splitQuality(x)[0]:
quality_class = qualityPresetStrings[x]
break
return quality_class
def searchEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = self._getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do download subtitles for that episode
previous_subtitles = set(subliminal.language.Language(x) for x in ep_obj.subtitles)
try:
ep_obj.subtitles = set(x.language for x in ep_obj.downloadSubtitles().values()[0])
except:
return json.dumps({'result': 'failure'})
# return the correct json value
if previous_subtitles != ep_obj.subtitles:
status = 'New subtitles downloaded: %s' % ' '.join([
"<img src='" + sickbeard.WEB_ROOT + "/images/flags/" + x.alpha2 +
".png' alt='" + x.name + "'/>" for x in
sorted(list(ep_obj.subtitles.difference(previous_subtitles)))])
else:
status = 'No subtitles downloaded'
ui.notifications.message('Subtitles Search', status)
return json.dumps({'result': status, 'subtitles': ','.join(sorted([x.alpha2 for x in
ep_obj.subtitles.union(
previous_subtitles)]))})
def setSceneNumbering(self, show, indexer, forSeason=None, forEpisode=None, forAbsolute=None, sceneSeason=None,
sceneEpisode=None, sceneAbsolute=None):
# sanitize:
if forSeason in ['null', '']: forSeason = None
if forEpisode in ['null', '']: forEpisode = None
if forAbsolute in ['null', '']: forAbsolute = None
if sceneSeason in ['null', '']: sceneSeason = None
if sceneEpisode in ['null', '']: sceneEpisode = None
if sceneAbsolute in ['null', '']: sceneAbsolute = None
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj.is_anime:
result = {
'success': True,
'forAbsolute': forAbsolute,
}
else:
result = {
'success': True,
'forSeason': forSeason,
'forEpisode': forEpisode,
}
# retrieve the episode object and fail if we can't get one
if showObj.is_anime:
ep_obj = self._getEpisode(show, absolute=forAbsolute)
else:
ep_obj = self._getEpisode(show, forSeason, forEpisode)
if isinstance(ep_obj, str):
result['success'] = False
result['errorMessage'] = ep_obj
elif showObj.is_anime:
logger.log(u"setAbsoluteSceneNumbering for %s from %s to %s" %
(show, forAbsolute, sceneAbsolute), logger.DEBUG)
show = int(show)
indexer = int(indexer)
forAbsolute = int(forAbsolute)
if sceneAbsolute is not None: sceneAbsolute = int(sceneAbsolute)
set_scene_numbering(show, indexer, absolute_number=forAbsolute, sceneAbsolute=sceneAbsolute)
else:
logger.log(u"setEpisodeSceneNumbering for %s from %sx%s to %sx%s" %
(show, forSeason, forEpisode, sceneSeason, sceneEpisode), logger.DEBUG)
show = int(show)
indexer = int(indexer)
forSeason = int(forSeason)
forEpisode = int(forEpisode)
if sceneSeason is not None: sceneSeason = int(sceneSeason)
if sceneEpisode is not None: sceneEpisode = int(sceneEpisode)
set_scene_numbering(show, indexer, season=forSeason, episode=forEpisode, sceneSeason=sceneSeason,
sceneEpisode=sceneEpisode)
if showObj.is_anime:
sn = get_scene_absolute_numbering(show, indexer, forAbsolute)
if sn:
result['sceneAbsolute'] = sn
else:
result['sceneAbsolute'] = None
else:
sn = get_scene_numbering(show, indexer, forSeason, forEpisode)
if sn:
(result['sceneSeason'], result['sceneEpisode']) = sn
else:
(result['sceneSeason'], result['sceneEpisode']) = (None, None)
return json.dumps(result)
def retryEpisode(self, show, season, episode, downCurQuality):
# retrieve the episode object and fail if we can't get one
ep_obj = self._getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.FailedQueueItem(ep_obj.show, [ep_obj], bool(int(downCurQuality)))
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item)
if not ep_queue_item.started and ep_queue_item.success is None:
return json.dumps(
{'result': 'success'}) # I Actually want to call it queued, because the search hasnt been started yet!
if ep_queue_item.started and ep_queue_item.success is None:
return json.dumps({'result': 'success'})
else:
return json.dumps({'result': 'failure'})
@route('/home/postprocess(/?.*)')
class HomePostProcess(Home):
def __init__(self, *args, **kwargs):
super(HomePostProcess, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="home_postprocess.tmpl")
t.submenu = self.HomeMenu()
return t.respond()
def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None, process_method=None, force=None,
is_priority=None, delete_on="0", failed="0", type="auto", *args, **kwargs):
if failed == "0":
failed = False
else:
failed = True
if force in ["on", "1"]:
force = True
else:
force = False
if is_priority in ["on", "1"]:
is_priority = True
else:
is_priority = False
if delete_on in ["on", "1"]:
delete_on = True
else:
delete_on = False
if not dir:
return self.redirect("/home/postprocess/")
else:
result = processTV.processDir(dir, nzbName, process_method=process_method, force=force,
is_priority=is_priority, delete_on=delete_on, failed=failed, type=type)
if quiet is not None and int(quiet) == 1:
return result
result = result.replace("\n", "<br />\n")
return self._genericMessage("Postprocessing results", result)
@route('/home/addShows(/?.*)')
class HomeAddShows(Home):
def __init__(self, *args, **kwargs):
super(HomeAddShows, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="home_addShows.tmpl")
t.submenu = self.HomeMenu()
return t.respond()
def getIndexerLanguages(self):
result = sickbeard.indexerApi().config['valid_languages']
# Make sure list is sorted alphabetically but 'en' is in front
if 'en' in result:
del result[result.index('en')]
result.sort()
result.insert(0, 'en')
return json.dumps({'results': result})
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
def searchIndexersForShowName(self, search_term, lang="en", indexer=None):
if not lang or lang == 'null':
lang = "en"
search_term = search_term.encode('utf-8')
results = {}
final_results = []
# Query Indexers for each search term and build the list of results
for indexer in sickbeard.indexerApi().indexers if not int(indexer) else [int(indexer)]:
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = lang
lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsListUI
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
logger.log("Searching for Show with searchterm: %s on Indexer: %s" % (
search_term, sickbeard.indexerApi(indexer).name), logger.DEBUG)
try:
# add search results
results.setdefault(indexer, []).extend(t[search_term])
except Exception, e:
continue
map(final_results.extend,
([[sickbeard.indexerApi(id).name, id, sickbeard.indexerApi(id).config["show_url"], int(show['id']),
show['seriesname'], show['firstaired']] for show in shows] for id, shows in results.items()))
lang_id = sickbeard.indexerApi().config['langabbv_to_id'][lang]
return json.dumps({'results': final_results, 'langid': lang_id})
def massAddTable(self, rootDir=None):
t = PageTemplate(rh=self, file="home_massAddTable.tmpl")
t.submenu = self.HomeMenu()
if not rootDir:
return "No folders selected."
elif type(rootDir) != list:
root_dirs = [rootDir]
else:
root_dirs = rootDir
root_dirs = [urllib.unquote_plus(x) for x in root_dirs]
if sickbeard.ROOT_DIRS:
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
else:
default_index = 0
if len(root_dirs) > default_index:
tmp = root_dirs[default_index]
if tmp in root_dirs:
root_dirs.remove(tmp)
root_dirs = [tmp] + root_dirs
dir_list = []
myDB = db.DBConnection()
for root_dir in root_dirs:
try:
file_list = ek.ek(os.listdir, root_dir)
except:
continue
for cur_file in file_list:
try:
cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file))
if not ek.ek(os.path.isdir, cur_path):
continue
except:
continue
cur_dir = {
'dir': cur_path,
'display_dir': '<b>' + ek.ek(os.path.dirname, cur_path) + os.sep + '</b>' + ek.ek(
os.path.basename,
cur_path),
}
# see if the folder is in KODI already
dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path])
if dirResults:
cur_dir['added_already'] = True
else:
cur_dir['added_already'] = False
dir_list.append(cur_dir)
indexer_id = show_name = indexer = None
for cur_provider in sickbeard.metadata_provider_dict.values():
if not (indexer_id and show_name):
(indexer_id, show_name, indexer) = cur_provider.retrieveShowMetadata(cur_path)
# default to TVDB if indexer was not detected
if show_name and not (indexer or indexer_id):
(sn, idx, id) = helpers.searchIndexerForShowID(show_name, indexer, indexer_id)
# set indexer and indexer_id from found info
if not indexer and idx:
indexer = idx
if not indexer_id and id:
indexer_id = id
cur_dir['existing_info'] = (indexer_id, show_name, indexer)
if indexer_id and helpers.findCertainShow(sickbeard.showList, indexer_id):
cur_dir['added_already'] = True
t.dirList = dir_list
return t.respond()
def newShow(self, show_to_add=None, other_shows=None):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(rh=self, file="home_newShow.tmpl")
t.submenu = self.HomeMenu()
indexer, show_dir, indexer_id, show_name = self.split_extra_show(show_to_add)
if indexer_id and indexer and show_name:
use_provided_info = True
else:
use_provided_info = False
# tell the template whether we're giving it show name & Indexer ID
t.use_provided_info = use_provided_info
# use the given show_dir for the indexer search if available
if not show_dir:
t.default_show_name = ''
elif not show_name:
t.default_show_name = re.sub(' \(\d{4}\)', '',
ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.', ' '))
else:
t.default_show_name = show_name
# carry a list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
if use_provided_info:
t.provided_indexer_id = int(indexer_id or 0)
t.provided_indexer_name = show_name
t.provided_show_dir = show_dir
t.other_shows = other_shows
t.provided_indexer = int(indexer or sickbeard.INDEXER_DEFAULT)
t.indexers = sickbeard.indexerApi().indexers
return t.respond()
def recommendedShows(self):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(rh=self, file="home_recommendedShows.tmpl")
t.submenu = self.HomeMenu()
return t.respond()
def getRecommendedShows(self):
final_results = []
logger.log(u"Getting recommended shows from Trakt.tv", logger.DEBUG)
trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
try:
recommendedlist = trakt_api.traktRequest("recommendations/shows?extended=full,images")
if recommendedlist:
indexers = ['tvdb', 'tvrage']
map(final_results.append, (
[int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]]),
'http://www.trakt.tv/shows/%s' % show['ids']['slug'], show['title'],
show['overview'],
None if show['first_aired'] is None else dateutil_parser.parse(show['first_aired']).strftime(sickbeard.DATE_PRESET)]
for show in recommendedlist if not helpers.findCertainShow(sickbeard.showList, [
int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]])])))
except (traktException, traktAuthException, traktServerBusy) as e:
logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
return json.dumps({'results': final_results})
def addRecommendedShow(self, whichSeries=None, indexerLang="en", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None,
fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None,
scene=None):
indexer = 1
indexer_name = sickbeard.indexerApi(int(indexer)).name
show_url = whichSeries.split('|')[1]
indexer_id = whichSeries.split('|')[0]
show_name = whichSeries.split('|')[2]
return self.addNewShow('|'.join([indexer_name, str(indexer), show_url, indexer_id, show_name, ""]),
indexerLang, rootDir,
defaultStatus,
anyQualities, bestQualities, flatten_folders, subtitles, fullShowPath, other_shows,
skipShow, providedIndexer, anime, scene)
def trendingShows(self):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(rh=self, file="home_trendingShows.tmpl")
t.submenu = self.HomeMenu()
return t.respond()
def getTrendingShows(self):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(rh=self, file="trendingShows.tmpl")
t.submenu = self.HomeMenu()
t.trending_shows = []
trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
try:
if sickbeard.TRAKT_BLACKLIST_NAME is not None:
not_liked_show = trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items") or []
limit_show = 50 + len(not_liked_show)
shows = trakt_api.traktRequest("shows/trending?limit=" + str(limit_show) + "&extended=full,images") or []
library_shows = trakt_api.traktRequest("sync/collection/shows?extended=full") or []
for show in shows:
try:
tvdb_id = int(show['show']['ids']['tvdb'])
tvrage_id = int(show['show']['ids']['tvrage'] or 0)
if not helpers.findCertainShow(sickbeard.showList,
[tvdb_id, tvrage_id]):
if show['show']['ids']['tvdb'] not in (lshow['show']['ids']['tvdb'] for lshow in library_shows):
if not_liked_show:
if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb'] for show in not_liked_show if show['type'] == 'show'):
t.trending_shows += [show]
except exceptions.MultipleShowObjectsException:
continue
if sickbeard.TRAKT_BLACKLIST_NAME != '':
t.blacklist = True
else:
t.blacklist = False
except (traktException, traktAuthException, traktServerBusy) as e:
logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING)
return t.respond()
def addShowToBlacklist(self, indexer_id):
# URL parameters
data = {
'shows': [
{
'ids': {
'tvdb': indexer_id
}
}
]
}
trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD)
result=trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items", data, method='POST')
return self.redirect('/home/addShows/trendingShows/')
def existingShows(self):
"""
Prints out the page to add existing shows from a root dir
"""
t = PageTemplate(rh=self, file="home_addExistingShow.tmpl")
t.submenu = self.HomeMenu()
return t.respond()
def addTraktShow(self, indexer_id, showName):
if helpers.findCertainShow(sickbeard.showList, int(indexer_id)):
return
if sickbeard.ROOT_DIRS:
root_dirs = sickbeard.ROOT_DIRS.split('|')
location = root_dirs[int(root_dirs[0]) + 1]
else:
location = None
if location:
show_dir = ek.ek(os.path.join, location, helpers.sanitizeFileName(showName))
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder " + show_dir + ", can't add the show", logger.ERROR)
return
else:
helpers.chmodAsParent(show_dir)
sickbeard.showQueueScheduler.action.addShow(1, int(indexer_id), show_dir,
default_status=sickbeard.STATUS_DEFAULT,
quality=sickbeard.QUALITY_DEFAULT,
flatten_folders=sickbeard.FLATTEN_FOLDERS_DEFAULT,
subtitles=sickbeard.SUBTITLES_DEFAULT,
anime=sickbeard.ANIME_DEFAULT,
scene=sickbeard.SCENE_DEFAULT)
ui.notifications.message('Show added', 'Adding the specified show into ' + show_dir)
else:
logger.log(u"There was an error creating the show, no root directory setting found", logger.ERROR)
return "No root directories setup, please go back and add one."
# done adding show
return self.redirect('/home/')
def addNewShow(self, whichSeries=None, indexerLang="en", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None,
fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None,
scene=None):
"""
Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
provided then it forwards back to newShow, if not it goes to /home.
"""
# grab our list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
def finishAddShow():
# if there are no extra shows then go home
if not other_shows:
return self.redirect('/home/')
# peel off the next one
next_show_dir = other_shows[0]
rest_of_show_dirs = other_shows[1:]
# go to add the next show
return self.newShow(next_show_dir, rest_of_show_dirs)
# if we're skipping then behave accordingly
if skipShow:
return finishAddShow()
# sanity check on our inputs
if (not rootDir and not fullShowPath) or not whichSeries:
return "Missing params, no Indexer ID or folder:" + repr(whichSeries) + " and " + repr(
rootDir) + "/" + repr(fullShowPath)
# figure out what show we're adding and where
series_pieces = whichSeries.split('|')
if (whichSeries and rootDir) or (whichSeries and fullShowPath and len(series_pieces) > 1):
if len(series_pieces) < 6:
logger.log("Unable to add show due to show selection. Not anough arguments: %s" % (repr(series_pieces)),
logger.ERROR)
ui.notifications.error("Unknown error. Unable to add show due to problem with show selection.")
return self.redirect('/home/addShows/existingShows/')
indexer = int(series_pieces[1])
indexer_id = int(series_pieces[3])
show_name = series_pieces[4]
else:
# if no indexer was provided use the default indexer set in General settings
if not providedIndexer:
providedIndexer = sickbeard.INDEXER_DEFAULT
indexer = int(providedIndexer)
indexer_id = int(whichSeries)
show_name = os.path.basename(os.path.normpath(fullShowPath))
# use the whole path if it's given, or else append the show name to the root dir to get the full show path
if fullShowPath:
show_dir = ek.ek(os.path.normpath, fullShowPath)
else:
show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name))
# blanket policy - if the dir exists you should have used "add existing show" numbnuts
if ek.ek(os.path.isdir, show_dir) and not fullShowPath:
ui.notifications.error("Unable to add show", "Folder " + show_dir + " exists already")
return self.redirect('/home/addShows/existingShows/')
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of " + show_dir + " due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder " + show_dir + ", can't add the show", logger.ERROR)
ui.notifications.error("Unable to add show",
"Unable to create the folder " + show_dir + ", can't add the show")
return self.redirect("/home/")
else:
helpers.chmodAsParent(show_dir)
# prepare the inputs for passing along
scene = config.checkbox_to_value(scene)
anime = config.checkbox_to_value(anime)
flatten_folders = config.checkbox_to_value(flatten_folders)
subtitles = config.checkbox_to_value(subtitles)
if not anyQualities:
anyQualities = []
if not bestQualities:
bestQualities = []
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
# add the show
sickbeard.showQueueScheduler.action.addShow(indexer, indexer_id, show_dir, int(defaultStatus), newQuality,
flatten_folders, indexerLang, subtitles, anime,
scene)
ui.notifications.message('Show added', 'Adding the specified show into ' + show_dir)
return finishAddShow()
def split_extra_show(self, extra_show):
if not extra_show:
return (None, None, None, None)
split_vals = extra_show.split('|')
if len(split_vals) < 4:
indexer = split_vals[0]
show_dir = split_vals[1]
return (indexer, show_dir, None, None)
indexer = split_vals[0]
show_dir = split_vals[1]
indexer_id = split_vals[2]
show_name = '|'.join(split_vals[3:])
return (indexer, show_dir, indexer_id, show_name)
def addExistingShows(self, shows_to_add=None, promptForSettings=None):
"""
Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards
along to the newShow page.
"""
# grab a list of other shows to add, if provided
if not shows_to_add:
shows_to_add = []
elif type(shows_to_add) != list:
shows_to_add = [shows_to_add]
shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add]
promptForSettings = config.checkbox_to_value(promptForSettings)
indexer_id_given = []
dirs_only = []
# separate all the ones with Indexer IDs
for cur_dir in shows_to_add:
if '|' in cur_dir:
split_vals = cur_dir.split('|')
if len(split_vals) < 3:
dirs_only.append(cur_dir)
if not '|' in cur_dir:
dirs_only.append(cur_dir)
else:
indexer, show_dir, indexer_id, show_name = self.split_extra_show(cur_dir)
if not show_dir or not indexer_id or not show_name:
continue
indexer_id_given.append((int(indexer), show_dir, int(indexer_id), show_name))
# if they want me to prompt for settings then I will just carry on to the newShow page
if promptForSettings and shows_to_add:
return self.newShow(shows_to_add[0], shows_to_add[1:])
# if they don't want me to prompt for settings then I can just add all the nfo shows now
num_added = 0
for cur_show in indexer_id_given:
indexer, show_dir, indexer_id, show_name = cur_show
if indexer is not None and indexer_id is not None:
# add the show
sickbeard.showQueueScheduler.action.addShow(indexer, indexer_id, show_dir,
default_status=sickbeard.STATUS_DEFAULT,
quality=sickbeard.QUALITY_DEFAULT,
flatten_folders=sickbeard.FLATTEN_FOLDERS_DEFAULT,
subtitles=sickbeard.SUBTITLES_DEFAULT,
anime=sickbeard.ANIME_DEFAULT,
scene=sickbeard.SCENE_DEFAULT)
num_added += 1
if num_added:
ui.notifications.message("Shows Added",
"Automatically added " + str(num_added) + " from their existing metadata files")
# if we're done then go home
if not dirs_only:
return self.redirect('/home/')
# for the remaining shows we need to prompt for each one, so forward this on to the newShow page
return self.newShow(dirs_only[0], dirs_only[1:])
@route('/manage(/?.*)')
class Manage(Home, WebRoot):
def __init__(self, *args, **kwargs):
super(Manage, self).__init__(*args, **kwargs)
def ManageMenu(self):
menu = [
{'title': 'Backlog Overview', 'path': 'manage/backlogOverview/'},
{'title': 'Manage Searches', 'path': 'manage/manageSearches/'},
{'title': 'Episode Status Management', 'path': 'manage/episodeStatuses/'}, ]
if sickbeard.USE_TORRENTS and sickbeard.TORRENT_METHOD != 'blackhole' \
and (sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'https'
or not sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'http:'):
menu.append({'title': 'Manage Torrents', 'path': 'manage/manageTorrents/'})
if sickbeard.USE_SUBTITLES:
menu.append({'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed/'})
if sickbeard.USE_FAILED_DOWNLOADS:
menu.append({'title': 'Failed Downloads', 'path': 'manage/failedDownloads/'})
return menu
def index(self):
t = PageTemplate(rh=self, file="manage.tmpl")
t.submenu = self.ManageMenu()
return t.respond()
def showEpisodeStatuses(self, indexer_id, whichStatus):
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER
myDB = db.DBConnection()
cur_show_results = myDB.select(
"SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN (" + ','.join(
['?'] * len(status_list)) + ")", [int(indexer_id)] + status_list)
result = {}
for cur_result in cur_show_results:
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
result[cur_season][cur_episode] = cur_result["name"]
return json.dumps(result)
def episodeStatuses(self, whichStatus=None):
if whichStatus:
whichStatus = int(whichStatus)
status_list = [whichStatus]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER
else:
status_list = []
t = PageTemplate(rh=self, file="manage_episodeStatuses.tmpl")
t.submenu = self.ManageMenu()
t.whichStatus = whichStatus
# if we have no status then this is as far as we need to go
if not status_list:
return t.respond()
myDB = db.DBConnection()
status_results = myDB.select(
"SELECT show_name, tv_shows.indexer_id AS indexer_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN (" + ','.join(
['?'] * len(
status_list)) + ") AND season != 0 AND tv_episodes.showid = tv_shows.indexer_id ORDER BY show_name",
status_list)
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
cur_indexer_id = int(cur_status_result["indexer_id"])
if cur_indexer_id not in ep_counts:
ep_counts[cur_indexer_id] = 1
else:
ep_counts[cur_indexer_id] += 1
show_names[cur_indexer_id] = cur_status_result["show_name"]
if cur_indexer_id not in sorted_show_ids:
sorted_show_ids.append(cur_indexer_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return t.respond()
def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs):
status_list = [int(oldStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER
to_change = {}
# make a list of all shows and their associated args
for arg in kwargs:
indexer_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if indexer_id not in to_change:
to_change[indexer_id] = []
to_change[indexer_id].append(what)
myDB = db.DBConnection()
for cur_indexer_id in to_change:
# get a list of all the eps we want to change if they just said "all"
if 'all' in to_change[cur_indexer_id]:
all_eps_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE status IN (" + ','.join(
['?'] * len(status_list)) + ") AND season != 0 AND showid = ?",
status_list + [cur_indexer_id])
all_eps = [str(x["season"]) + 'x' + str(x["episode"]) for x in all_eps_results]
to_change[cur_indexer_id] = all_eps
self.setStatus(cur_indexer_id, '|'.join(to_change[cur_indexer_id]), newStatus, direct=True)
return self.redirect('/manage/episodeStatuses/')
def showSubtitleMissed(self, indexer_id, whichSubs):
myDB = db.DBConnection()
cur_show_results = myDB.select(
"SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'",
[int(indexer_id)])
result = {}
for cur_result in cur_show_results:
if whichSubs == 'all':
if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(
subtitles.wantedLanguages()):
continue
elif whichSubs in cur_result["subtitles"].split(','):
continue
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
if cur_episode not in result[cur_season]:
result[cur_season][cur_episode] = {}
result[cur_season][cur_episode]["name"] = cur_result["name"]
result[cur_season][cur_episode]["subtitles"] = ",".join(
subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not \
cur_result["subtitles"] == '' else ''
return json.dumps(result)
def subtitleMissed(self, whichSubs=None):
t = PageTemplate(rh=self, file="manage_subtitleMissed.tmpl")
t.submenu = self.ManageMenu()
t.whichSubs = whichSubs
if not whichSubs:
return t.respond()
myDB = db.DBConnection()
status_results = myDB.select(
"SELECT show_name, tv_shows.indexer_id as indexer_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.indexer_id ORDER BY show_name")
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
if whichSubs == 'all':
if len(set(cur_status_result["subtitles"].split(',')).intersection(
set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_status_result["subtitles"].split(','):
continue
cur_indexer_id = int(cur_status_result["indexer_id"])
if cur_indexer_id not in ep_counts:
ep_counts[cur_indexer_id] = 1
else:
ep_counts[cur_indexer_id] += 1
show_names[cur_indexer_id] = cur_status_result["show_name"]
if cur_indexer_id not in sorted_show_ids:
sorted_show_ids.append(cur_indexer_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return t.respond()
def downloadSubtitleMissed(self, *args, **kwargs):
to_download = {}
# make a list of all shows and their associated args
for arg in kwargs:
indexer_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if indexer_id not in to_download:
to_download[indexer_id] = []
to_download[indexer_id].append(what)
for cur_indexer_id in to_download:
# get a list of all the eps we want to download subtitles if they just said "all"
if 'all' in to_download[cur_indexer_id]:
myDB = db.DBConnection()
all_eps_results = myDB.select(
"SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?",
[cur_indexer_id])
to_download[cur_indexer_id] = [str(x["season"]) + 'x' + str(x["episode"]) for x in all_eps_results]
for epResult in to_download[cur_indexer_id]:
season, episode = epResult.split('x')
show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_indexer_id))
subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles()
return self.redirect('/manage/subtitleMissed/')
def backlogShow(self, indexer_id):
show_obj = helpers.findCertainShow(sickbeard.showList, int(indexer_id))
if show_obj:
sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj])
return self.redirect("/manage/backlogOverview/")
def backlogOverview(self):
t = PageTemplate(rh=self, file="manage_backlogOverview.tmpl")
t.submenu = self.ManageMenu()
showCounts = {}
showCats = {}
showSQLResults = {}
myDB = db.DBConnection()
for curShow in sickbeard.showList:
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[curShow.indexerid])
for curResult in sqlResults:
curEpCat = curShow.getOverview(int(curResult["status"] or -1))
if curEpCat:
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
showCounts[curShow.indexerid] = epCounts
showCats[curShow.indexerid] = epCats
showSQLResults[curShow.indexerid] = sqlResults
t.showCounts = showCounts
t.showCats = showCats
t.showSQLResults = showSQLResults
return t.respond()
def massEdit(self, toEdit=None):
t = PageTemplate(rh=self, file="manage_massEdit.tmpl")
t.submenu = self.ManageMenu()
if not toEdit:
return self.redirect("/manage/")
showIDs = toEdit.split("|")
showList = []
for curID in showIDs:
curID = int(curID)
showObj = helpers.findCertainShow(sickbeard.showList, curID)
if showObj:
showList.append(showObj)
archive_firstmatch_all_same = True
last_archive_firstmatch = None
flatten_folders_all_same = True
last_flatten_folders = None
paused_all_same = True
last_paused = None
default_ep_status_all_same = True
last_default_ep_status = None
anime_all_same = True
last_anime = None
sports_all_same = True
last_sports = None
quality_all_same = True
last_quality = None
subtitles_all_same = True
last_subtitles = None
scene_all_same = True
last_scene = None
air_by_date_all_same = True
last_air_by_date = None
root_dir_list = []
for curShow in showList:
cur_root_dir = ek.ek(os.path.dirname, curShow._location)
if cur_root_dir not in root_dir_list:
root_dir_list.append(cur_root_dir)
if archive_firstmatch_all_same:
# if we had a value already and this value is different then they're not all the same
if last_archive_firstmatch not in (None, curShow.archive_firstmatch):
archive_firstmatch_all_same = False
else:
last_archive_firstmatch = curShow.archive_firstmatch
# if we know they're not all the same then no point even bothering
if paused_all_same:
# if we had a value already and this value is different then they're not all the same
if last_paused not in (None, curShow.paused):
paused_all_same = False
else:
last_paused = curShow.paused
if default_ep_status_all_same:
if last_default_ep_status not in (None, curShow.default_ep_status):
default_ep_status_all_same = False
else:
last_default_ep_status = curShow.default_ep_status
if anime_all_same:
# if we had a value already and this value is different then they're not all the same
if last_anime not in (None, curShow.is_anime):
anime_all_same = False
else:
last_anime = curShow.anime
if flatten_folders_all_same:
if last_flatten_folders not in (None, curShow.flatten_folders):
flatten_folders_all_same = False
else:
last_flatten_folders = curShow.flatten_folders
if quality_all_same:
if last_quality not in (None, curShow.quality):
quality_all_same = False
else:
last_quality = curShow.quality
if subtitles_all_same:
if last_subtitles not in (None, curShow.subtitles):
subtitles_all_same = False
else:
last_subtitles = curShow.subtitles
if scene_all_same:
if last_scene not in (None, curShow.scene):
scene_all_same = False
else:
last_scene = curShow.scene
if sports_all_same:
if last_sports not in (None, curShow.sports):
sports_all_same = False
else:
last_sports = curShow.sports
if air_by_date_all_same:
if last_air_by_date not in (None, curShow.air_by_date):
air_by_date_all_same = False
else:
last_air_by_date = curShow.air_by_date
t.showList = toEdit
t.archive_firstmatch_value = last_archive_firstmatch if archive_firstmatch_all_same else None
t.default_ep_status_value = last_default_ep_status if default_ep_status_all_same else None
t.paused_value = last_paused if paused_all_same else None
t.anime_value = last_anime if anime_all_same else None
t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None
t.quality_value = last_quality if quality_all_same else None
t.subtitles_value = last_subtitles if subtitles_all_same else None
t.scene_value = last_scene if scene_all_same else None
t.sports_value = last_sports if sports_all_same else None
t.air_by_date_value = last_air_by_date if air_by_date_all_same else None
t.root_dir_list = root_dir_list
return t.respond()
def massEditSubmit(self, archive_firstmatch=None, paused=None, default_ep_status=None,
anime=None, sports=None, scene=None, flatten_folders=None, quality_preset=False,
subtitles=None, air_by_date=None, anyQualities=[], bestQualities=[], toEdit=None, *args,
**kwargs):
dir_map = {}
for cur_arg in kwargs:
if not cur_arg.startswith('orig_root_dir_'):
continue
which_index = cur_arg.replace('orig_root_dir_', '')
end_dir = kwargs['new_root_dir_' + which_index]
dir_map[kwargs[cur_arg]] = end_dir
showIDs = toEdit.split("|")
errors = []
for curShow in showIDs:
curErrors = []
showObj = helpers.findCertainShow(sickbeard.showList, int(curShow))
if not showObj:
continue
cur_root_dir = ek.ek(os.path.dirname, showObj._location)
cur_show_dir = ek.ek(os.path.basename, showObj._location)
if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir)
logger.log(
u"For show " + showObj.name + " changing dir from " + showObj._location + " to " + new_show_dir)
else:
new_show_dir = showObj._location
if archive_firstmatch == 'keep':
new_archive_firstmatch = showObj.archive_firstmatch
else:
new_archive_firstmatch = True if archive_firstmatch == 'enable' else False
new_archive_firstmatch = 'on' if new_archive_firstmatch else 'off'
if paused == 'keep':
new_paused = showObj.paused
else:
new_paused = True if paused == 'enable' else False
new_paused = 'on' if new_paused else 'off'
if default_ep_status == 'keep':
new_default_ep_status = showObj.default_ep_status
else:
new_default_ep_status = default_ep_status
if anime == 'keep':
new_anime = showObj.anime
else:
new_anime = True if anime == 'enable' else False
new_anime = 'on' if new_anime else 'off'
if sports == 'keep':
new_sports = showObj.sports
else:
new_sports = True if sports == 'enable' else False
new_sports = 'on' if new_sports else 'off'
if scene == 'keep':
new_scene = showObj.is_scene
else:
new_scene = True if scene == 'enable' else False
new_scene = 'on' if new_scene else 'off'
if air_by_date == 'keep':
new_air_by_date = showObj.air_by_date
else:
new_air_by_date = True if air_by_date == 'enable' else False
new_air_by_date = 'on' if new_air_by_date else 'off'
if flatten_folders == 'keep':
new_flatten_folders = showObj.flatten_folders
else:
new_flatten_folders = True if flatten_folders == 'enable' else False
new_flatten_folders = 'on' if new_flatten_folders else 'off'
if subtitles == 'keep':
new_subtitles = showObj.subtitles
else:
new_subtitles = True if subtitles == 'enable' else False
new_subtitles = 'on' if new_subtitles else 'off'
if quality_preset == 'keep':
anyQualities, bestQualities = Quality.splitQuality(showObj.quality)
exceptions_list = []
curErrors += self.editShow(curShow, new_show_dir, anyQualities,
bestQualities, exceptions_list,
defaultEpStatus=new_default_ep_status,
archive_firstmatch=new_archive_firstmatch,
flatten_folders=new_flatten_folders,
paused=new_paused, sports=new_sports,
subtitles=new_subtitles, anime=new_anime,
scene=new_scene, air_by_date=new_air_by_date,
directCall=True)
if curErrors:
logger.log(u"Errors: " + str(curErrors), logger.ERROR)
errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(
['<li>%s</li>' % error for error in curErrors]) + "</ul>")
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
" ".join(errors))
return self.redirect("/manage/")
def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toRemove=None, toMetadata=None,
toSubtitle=None):
if toUpdate is not None:
toUpdate = toUpdate.split('|')
else:
toUpdate = []
if toRefresh is not None:
toRefresh = toRefresh.split('|')
else:
toRefresh = []
if toRename is not None:
toRename = toRename.split('|')
else:
toRename = []
if toSubtitle is not None:
toSubtitle = toSubtitle.split('|')
else:
toSubtitle = []
if toDelete is not None:
toDelete = toDelete.split('|')
else:
toDelete = []
if toRemove is not None:
toRemove = toRemove.split('|')
else:
toRemove = []
if toMetadata is not None:
toMetadata = toMetadata.split('|')
else:
toMetadata = []
errors = []
refreshes = []
updates = []
renames = []
subtitles = []
for curShowID in set(toUpdate + toRefresh + toRename + toSubtitle + toDelete + toRemove + toMetadata):
if curShowID == '':
continue
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID))
if showObj is None:
continue
if curShowID in toDelete:
showObj.deleteShow(True)
# don't do anything else if it's being deleted
continue
if curShowID in toRemove:
showObj.deleteShow()
# don't do anything else if it's being remove
continue
if curShowID in toUpdate:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True)
updates.append(showObj.name)
except exceptions.CantUpdateException, e:
errors.append("Unable to update show " + showObj.name + ": " + ex(e))
# don't bother refreshing shows that were updated anyway
if curShowID in toRefresh and curShowID not in toUpdate:
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj)
refreshes.append(showObj.name)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh show " + showObj.name + ": " + ex(e))
if curShowID in toRename:
sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj)
renames.append(showObj.name)
if curShowID in toSubtitle:
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj)
subtitles.append(showObj.name)
if len(errors) > 0:
ui.notifications.error("Errors encountered",
'<br >\n'.join(errors))
messageDetail = ""
if len(updates) > 0:
messageDetail += "<br /><b>Updates</b><br /><ul><li>"
messageDetail += "</li><li>".join(updates)
messageDetail += "</li></ul>"
if len(refreshes) > 0:
messageDetail += "<br /><b>Refreshes</b><br /><ul><li>"
messageDetail += "</li><li>".join(refreshes)
messageDetail += "</li></ul>"
if len(renames) > 0:
messageDetail += "<br /><b>Renames</b><br /><ul><li>"
messageDetail += "</li><li>".join(renames)
messageDetail += "</li></ul>"
if len(subtitles) > 0:
messageDetail += "<br /><b>Subtitles</b><br /><ul><li>"
messageDetail += "</li><li>".join(subtitles)
messageDetail += "</li></ul>"
if len(updates + refreshes + renames + subtitles) > 0:
ui.notifications.message("The following actions were queued:",
messageDetail)
return self.redirect("/manage/")
def manageTorrents(self):
t = PageTemplate(rh=self, file="manage_torrents.tmpl")
t.info_download_station = ''
t.submenu = self.ManageMenu()
if re.search('localhost', sickbeard.TORRENT_HOST):
if sickbeard.LOCALHOST_IP == '':
t.webui_url = re.sub('localhost', helpers.get_lan_ip(), sickbeard.TORRENT_HOST)
else:
t.webui_url = re.sub('localhost', sickbeard.LOCALHOST_IP, sickbeard.TORRENT_HOST)
else:
t.webui_url = sickbeard.TORRENT_HOST
if sickbeard.TORRENT_METHOD == 'utorrent':
t.webui_url = '/'.join(s.strip('/') for s in (t.webui_url, 'gui/'))
if sickbeard.TORRENT_METHOD == 'download_station':
if helpers.check_url(t.webui_url + 'download/'):
t.webui_url = t.webui_url + 'download/'
else:
t.info_download_station = '<p>To have a better experience please set the Download Station alias as <code>download</code>, you can check this setting in the Synology DSM <b>Control Panel</b> > <b>Application Portal</b>. Make sure you allow DSM to be embedded with iFrames too in <b>Control Panel</b> > <b>DSM Settings</b> > <b>Security</b>.</p><br/><p>There is more information about this available <a href="https://github.com/midgetspy/Sick-Beard/pull/338">here</a>.</p><br/>'
if not sickbeard.TORRENT_PASSWORD == "" and not sickbeard.TORRENT_USERNAME == "":
t.webui_url = re.sub('://', '://' + str(sickbeard.TORRENT_USERNAME) + ':' + str(sickbeard.TORRENT_PASSWORD) + '@' ,t.webui_url)
return t.respond()
def failedDownloads(self, limit=100, toRemove=None):
myDB = db.DBConnection('failed.db')
if limit == "0":
sqlResults = myDB.select("SELECT * FROM failed")
else:
sqlResults = myDB.select("SELECT * FROM failed LIMIT ?", [limit])
toRemove = toRemove.split("|") if toRemove is not None else []
for release in toRemove:
myDB.action("DELETE FROM failed WHERE failed.release = ?", [release])
if toRemove:
return self.redirect('/manage/failedDownloads/')
t = PageTemplate(rh=self, file="manage_failedDownloads.tmpl")
t.failedResults = sqlResults
t.limit = limit
t.submenu = self.ManageMenu()
return t.respond()
@route('/manage/manageSearches(/?.*)')
class ManageSearches(Manage):
def __init__(self, *args, **kwargs):
super(ManageSearches, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="manage_manageSearches.tmpl")
# t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused()
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress()
t.dailySearchStatus = sickbeard.dailySearchScheduler.action.amActive
t.findPropersStatus = sickbeard.properFinderScheduler.action.amActive
t.queueLength = sickbeard.searchQueueScheduler.action.queue_length()
t.submenu = self.ManageMenu()
return t.respond()
def forceBacklog(self):
# force it to run the next time it looks
result = sickbeard.backlogSearchScheduler.forceRun()
if result:
logger.log(u"Backlog search forced")
ui.notifications.message('Backlog search started')
return self.redirect("/manage/manageSearches/")
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.dailySearchScheduler.forceRun()
if result:
logger.log(u"Daily search forced")
ui.notifications.message('Daily search started')
return self.redirect("/manage/manageSearches/")
def forceFindPropers(self):
# force it to run the next time it looks
result = sickbeard.properFinderScheduler.forceRun()
if result:
logger.log(u"Find propers search forced")
ui.notifications.message('Find propers search started')
return self.redirect("/manage/manageSearches/")
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog()
else:
sickbeard.searchQueueScheduler.action.unpause_backlog()
return self.redirect("/manage/manageSearches/")
@route('/history(/?.*)')
class History(WebRoot):
def __init__(self, *args, **kwargs):
super(History, self).__init__(*args, **kwargs)
def index(self, limit=100):
# sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.indexer_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage))
myDB = db.DBConnection()
if limit == "0":
sqlResults = myDB.select(
"SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.indexer_id ORDER BY date DESC")
else:
sqlResults = myDB.select(
"SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.indexer_id ORDER BY date DESC LIMIT ?",
[limit])
history = {'show_id': 0, 'season': 0, 'episode': 0, 'quality': 0,
'actions': [{'time': '', 'action': '', 'provider': ''}]}
compact = []
for sql_result in sqlResults:
if not any((history['show_id'] == sql_result['showid']
and history['season'] == sql_result['season']
and history['episode'] == sql_result['episode']
and history['quality'] == sql_result['quality'])
for history in compact):
history = {}
history['show_id'] = sql_result['showid']
history['season'] = sql_result['season']
history['episode'] = sql_result['episode']
history['quality'] = sql_result['quality']
history['show_name'] = sql_result['show_name']
history['resource'] = sql_result['resource']
action = {}
history['actions'] = []
action['time'] = sql_result['date']
action['action'] = sql_result['action']
action['provider'] = sql_result['provider']
action['resource'] = sql_result['resource']
history['actions'].append(action)
history['actions'].sort(key=lambda x: x['time'])
compact.append(history)
else:
index = [i for i, dict in enumerate(compact) \
if dict['show_id'] == sql_result['showid'] \
and dict['season'] == sql_result['season'] \
and dict['episode'] == sql_result['episode']
and dict['quality'] == sql_result['quality']][0]
action = {}
history = compact[index]
action['time'] = sql_result['date']
action['action'] = sql_result['action']
action['provider'] = sql_result['provider']
action['resource'] = sql_result['resource']
history['actions'].append(action)
history['actions'].sort(key=lambda x: x['time'], reverse=True)
t = PageTemplate(rh=self, file="history.tmpl")
t.historyResults = sqlResults
t.compactResults = compact
t.limit = limit
t.submenu = [
{'title': 'Clear History', 'path': 'history/clearHistory'},
{'title': 'Trim History', 'path': 'history/trimHistory'},
]
return t.respond()
def clearHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE 1=1")
ui.notifications.message('History cleared')
return self.redirect("/history/")
def trimHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE date < " + str(
(datetime.datetime.today() - datetime.timedelta(days=30)).strftime(history.dateFormat)))
ui.notifications.message('Removed history entries greater than 30 days old')
return self.redirect("/history/")
@route('/config(/?.*)')
class Config(WebRoot):
def __init__(self, *args, **kwargs):
super(Config, self).__init__(*args, **kwargs)
def ConfigMenu(self):
menu = [
{'title': 'General', 'path': 'config/general/'},
{'title': 'Backup/Restore', 'path': 'config/backuprestore/'},
{'title': 'Search Settings', 'path': 'config/search/'},
{'title': 'Search Providers', 'path': 'config/providers/'},
{'title': 'Subtitles Settings', 'path': 'config/subtitles/'},
{'title': 'Post Processing', 'path': 'config/postProcessing/'},
{'title': 'Notifications', 'path': 'config/notifications/'},
{'title': 'Anime', 'path': 'config/anime/'},
]
return menu
def index(self):
t = PageTemplate(rh=self, file="config.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
@route('/config/general(/?.*)')
class ConfigGeneral(Config):
def __init__(self, *args, **kwargs):
super(ConfigGeneral, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_general.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def generateApiKey(self):
return helpers.generateApiKey()
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
def saveAddShowDefaults(self, defaultStatus, anyQualities, bestQualities, defaultFlattenFolders, subtitles=False,
anime=False, scene=False):
if anyQualities:
anyQualities = anyQualities.split(',')
else:
anyQualities = []
if bestQualities:
bestQualities = bestQualities.split(',')
else:
bestQualities = []
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
sickbeard.STATUS_DEFAULT = int(defaultStatus)
sickbeard.QUALITY_DEFAULT = int(newQuality)
sickbeard.FLATTEN_FOLDERS_DEFAULT = config.checkbox_to_value(defaultFlattenFolders)
sickbeard.SUBTITLES_DEFAULT = config.checkbox_to_value(subtitles)
sickbeard.ANIME_DEFAULT = config.checkbox_to_value(anime)
sickbeard.SCENE_DEFAULT = config.checkbox_to_value(scene)
sickbeard.save_config()
def saveGeneral(self, log_dir=None, log_nr = 5, log_size = 1048576, web_port=None, web_log=None, encryption_version=None, web_ipv6=None,
update_shows_on_start=None, update_shows_on_snatch=None, trash_remove_show=None, trash_rotate_logs=None, update_frequency=None,
launch_browser=None, showupdate_hour=3, web_username=None,
api_key=None, indexer_default=None, timezone_display=None, cpu_preset=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None,
handle_reverse_proxy=None, sort_article=None, auto_update=None, notify_on_update=None,
proxy_setting=None, proxy_indexers=None, anon_redirect=None, git_path=None, git_remote=None,
calendar_unprotected=None, no_restart=None,
display_filesize=None, fuzzy_dating=None, trim_zero=None, date_preset=None, date_preset_na=None, time_preset=None,
indexer_timeout=None, play_videos=None, download_url=None, rootDir=None, theme_name=None,
git_reset=None, git_username=None, git_password=None, git_autoissues=None):
results = []
# Misc
sickbeard.PLAY_VIDEOS = config.checkbox_to_value(play_videos)
sickbeard.DOWNLOAD_URL = download_url
sickbeard.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser)
sickbeard.SHOWUPDATE_HOUR = config.to_int(showupdate_hour)
config.change_VERSION_NOTIFY(config.checkbox_to_value(version_notify))
sickbeard.AUTO_UPDATE = config.checkbox_to_value(auto_update)
sickbeard.NOTIFY_ON_UPDATE = config.checkbox_to_value(notify_on_update)
# sickbeard.LOG_DIR is set in config.change_LOG_DIR()
sickbeard.LOG_NR = log_nr
sickbeard.LOG_SIZE = log_size
sickbeard.UPDATE_SHOWS_ON_START = config.checkbox_to_value(update_shows_on_start)
sickbeard.UPDATE_SHOWS_ON_SNATCH = config.checkbox_to_value(update_shows_on_snatch)
sickbeard.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show)
sickbeard.TRASH_ROTATE_LOGS = config.checkbox_to_value(trash_rotate_logs)
config.change_UPDATE_FREQUENCY(update_frequency)
sickbeard.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser)
sickbeard.SORT_ARTICLE = config.checkbox_to_value(sort_article)
sickbeard.CPU_PRESET = cpu_preset
sickbeard.ANON_REDIRECT = anon_redirect
sickbeard.PROXY_SETTING = proxy_setting
sickbeard.PROXY_INDEXERS = config.checkbox_to_value(proxy_indexers)
sickbeard.GIT_USERNAME = git_username
sickbeard.GIT_PASSWORD = git_password
sickbeard.GIT_RESET = config.checkbox_to_value(git_reset)
sickbeard.GIT_AUTOISSUES = config.checkbox_to_value(git_autoissues)
sickbeard.GIT_PATH = git_path
sickbeard.GIT_REMOTE = git_remote
sickbeard.CALENDAR_UNPROTECTED = config.checkbox_to_value(calendar_unprotected)
sickbeard.NO_RESTART = config.checkbox_to_value(no_restart)
# sickbeard.LOG_DIR is set in config.change_LOG_DIR()
sickbeard.WEB_PORT = config.to_int(web_port)
sickbeard.WEB_IPV6 = config.checkbox_to_value(web_ipv6)
# sickbeard.WEB_LOG is set in config.change_LOG_DIR()
sickbeard.ENCRYPTION_VERSION = config.checkbox_to_value(encryption_version)
sickbeard.WEB_USERNAME = web_username
sickbeard.WEB_PASSWORD = web_password
sickbeard.DISPLAY_FILESIZE = config.checkbox_to_value(display_filesize)
sickbeard.FUZZY_DATING = config.checkbox_to_value(fuzzy_dating)
sickbeard.TRIM_ZERO = config.checkbox_to_value(trim_zero)
if date_preset:
sickbeard.DATE_PRESET = date_preset
discarded_na_data = date_preset_na
if indexer_default:
sickbeard.INDEXER_DEFAULT = config.to_int(indexer_default)
if indexer_timeout:
sickbeard.INDEXER_TIMEOUT = config.to_int(indexer_timeout)
if time_preset:
sickbeard.TIME_PRESET_W_SECONDS = time_preset
sickbeard.TIME_PRESET = sickbeard.TIME_PRESET_W_SECONDS.replace(u":%S", u"")
sickbeard.TIMEZONE_DISPLAY = timezone_display
if not config.change_LOG_DIR(log_dir, web_log):
results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log directory not changed."]
sickbeard.API_KEY = api_key
sickbeard.ENABLE_HTTPS = config.checkbox_to_value(enable_https)
if not config.change_HTTPS_CERT(https_cert):
results += [
"Unable to create directory " + os.path.normpath(https_cert) + ", https cert directory not changed."]
if not config.change_HTTPS_KEY(https_key):
results += [
"Unable to create directory " + os.path.normpath(https_key) + ", https key directory not changed."]
sickbeard.HANDLE_REVERSE_PROXY = config.checkbox_to_value(handle_reverse_proxy)
sickbeard.THEME_NAME = theme_name
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/general/")
@route('/config/backuprestore(/?.*)')
class ConfigBackupRestore(Config):
def __init__(self, *args, **kwargs):
super(ConfigBackupRestore, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_backuprestore.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def backup(self, backupDir=None):
finalResult = ''
if backupDir:
source = [os.path.join(sickbeard.DATA_DIR, 'sickbeard.db'), sickbeard.CONFIG_FILE]
source.append(os.path.join(sickbeard.DATA_DIR, 'failed.db'))
source.append(os.path.join(sickbeard.DATA_DIR, 'cache.db'))
target = os.path.join(backupDir, 'sickrage-' + time.strftime('%Y%m%d%H%M%S') + '.zip')
for (path, dirs, files) in os.walk(sickbeard.CACHE_DIR, topdown=True):
for dirname in dirs:
if path == sickbeard.CACHE_DIR and dirname not in ['images']:
dirs.remove(dirname)
for filename in files:
source.append(os.path.join(path, filename))
if helpers.backupConfigZip(source, target, sickbeard.DATA_DIR):
finalResult += "Successful backup to " + target
else:
finalResult += "Backup FAILED"
else:
finalResult += "You need to choose a folder to save your backup to!"
finalResult += "<br />\n"
return finalResult
def restore(self, backupFile=None):
finalResult = ''
if backupFile:
source = backupFile
target_dir = os.path.join(sickbeard.DATA_DIR, 'restore')
if helpers.restoreConfigZip(source, target_dir):
finalResult += "Successfully extracted restore files to " + target_dir
finalResult += "<br>Restart sickrage to complete the restore."
else:
finalResult += "Restore FAILED"
else:
finalResult += "You need to select a backup file to restore!"
finalResult += "<br />\n"
return finalResult
@route('/config/search(/?.*)')
class ConfigSearch(Config):
def __init__(self, *args, **kwargs):
super(ConfigSearch, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_search.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_category_anime=None, sab_host=None, nzbget_username=None,
nzbget_password=None, nzbget_category=None, nzbget_category_anime=None, nzbget_priority=None,
nzbget_host=None, nzbget_use_https=None, backlog_days=None, backlog_frequency=None,
dailysearch_frequency=None, nzb_method=None, torrent_method=None, usenet_retention=None,
download_propers=None, check_propers_interval=None, allow_high_priority=None, sab_forced=None,
randomize_providers=None, backlog_startup=None, use_failed_downloads=None, delete_failed=None,
dailysearch_startup=None, torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_label_anime=None, torrent_path=None, torrent_verify_cert=None,
torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None, coming_eps_missed_range=None,
torrent_rpcurl=None, torrent_auth_type = None, ignore_words=None, require_words=None):
results = []
if not config.change_NZB_DIR(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_TORRENT_DIR(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_DAILYSEARCH_FREQUENCY(dailysearch_frequency)
config.change_BACKLOG_FREQUENCY(backlog_frequency)
sickbeard.BACKLOG_DAYS = config.to_int(backlog_days, default=7)
sickbeard.COMING_EPS_MISSED_RANGE = config.to_int(coming_eps_missed_range,default=7)
sickbeard.USE_NZBS = config.checkbox_to_value(use_nzbs)
sickbeard.USE_TORRENTS = config.checkbox_to_value(use_torrents)
sickbeard.NZB_METHOD = nzb_method
sickbeard.TORRENT_METHOD = torrent_method
sickbeard.USENET_RETENTION = config.to_int(usenet_retention, default=500)
sickbeard.IGNORE_WORDS = ignore_words if ignore_words else ""
sickbeard.REQUIRE_WORDS = require_words if require_words else ""
sickbeard.RANDOMIZE_PROVIDERS = config.checkbox_to_value(randomize_providers)
sickbeard.DOWNLOAD_PROPERS = config.checkbox_to_value(download_propers)
config.change_DOWNLOAD_PROPERS(sickbeard.DOWNLOAD_PROPERS)
if sickbeard.DOWNLOAD_PROPERS and not sickbeard.properFinderScheduler.isAlive():
sickbeard.properFinderScheduler.silent = False
try:
sickbeard.properFinderScheduler.start()
except:
pass
elif not sickbeard.DOWNLOAD_PROPERS:
sickbeard.properFinderScheduler.stop.set()
sickbeard.properFinderScheduler.silent = True
try:
sickbeard.properFinderScheduler.join(5)
except:
pass
sickbeard.CHECK_PROPERS_INTERVAL = check_propers_interval
sickbeard.ALLOW_HIGH_PRIORITY = config.checkbox_to_value(allow_high_priority)
sickbeard.DAILYSEARCH_STARTUP = config.checkbox_to_value(dailysearch_startup)
sickbeard.BACKLOG_STARTUP = config.checkbox_to_value(backlog_startup)
sickbeard.USE_FAILED_DOWNLOADS = config.checkbox_to_value(use_failed_downloads)
sickbeard.DELETE_FAILED = config.checkbox_to_value(delete_failed)
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
sickbeard.SAB_APIKEY = sab_apikey.strip()
sickbeard.SAB_CATEGORY = sab_category
sickbeard.SAB_CATEGORY_ANIME = sab_category_anime
sickbeard.SAB_HOST = config.clean_url(sab_host)
sickbeard.SAB_FORCED = config.checkbox_to_value(sab_forced)
sickbeard.NZBGET_USERNAME = nzbget_username
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_CATEGORY_ANIME = nzbget_category_anime
sickbeard.NZBGET_HOST = config.clean_host(nzbget_host)
sickbeard.NZBGET_USE_HTTPS = config.checkbox_to_value(nzbget_use_https)
sickbeard.NZBGET_PRIORITY = config.to_int(nzbget_priority, default=100)
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
sickbeard.TORRENT_LABEL = torrent_label
sickbeard.TORRENT_LABEL_ANIME = torrent_label_anime
sickbeard.TORRENT_VERIFY_CERT = config.checkbox_to_value(torrent_verify_cert)
sickbeard.TORRENT_PATH = torrent_path
sickbeard.TORRENT_SEED_TIME = torrent_seed_time
sickbeard.TORRENT_PAUSED = config.checkbox_to_value(torrent_paused)
sickbeard.TORRENT_HIGH_BANDWIDTH = config.checkbox_to_value(torrent_high_bandwidth)
sickbeard.TORRENT_HOST = config.clean_url(torrent_host)
sickbeard.TORRENT_RPCURL = torrent_rpcurl
sickbeard.TORRENT_AUTH_TYPE = torrent_auth_type
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/search/")
@route('/config/postProcessing(/?.*)')
class ConfigPostProcessing(Config):
def __init__(self, *args, **kwargs):
super(ConfigPostProcessing, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_postProcessing.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None,
kodi_data=None, kodi_12plus_data=None, mediabrowser_data=None, sony_ps3_data=None,
wdtv_data=None, tivo_data=None, mede8er_data=None,
keep_processed_dir=None, process_method=None, del_rar_contents=None, process_automatically=None,
no_delete=None, rename_episodes=None, airdate_episodes=None, unpack=None,
move_associated_files=None, sync_files=None, postpone_if_sync_files=None, nfo_rename=None,
tv_download_dir=None, naming_custom_abd=None,
naming_anime=None,
naming_abd_pattern=None, naming_strip_year=None, use_failed_downloads=None,
delete_failed=None, extra_scripts=None, skip_removed_files=None,
naming_custom_sports=None, naming_sports_pattern=None,
naming_custom_anime=None, naming_anime_pattern=None, naming_anime_multi_ep=None,
autopostprocesser_frequency=None):
results = []
if not config.change_TV_DOWNLOAD_DIR(tv_download_dir):
results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."]
sickbeard.PROCESS_AUTOMATICALLY = config.checkbox_to_value(process_automatically)
config.change_AUTOPOSTPROCESSER_FREQUENCY(autopostprocesser_frequency)
if sickbeard.PROCESS_AUTOMATICALLY and not sickbeard.autoPostProcesserScheduler.isAlive():
sickbeard.autoPostProcesserScheduler.silent = False
try:
sickbeard.autoPostProcesserScheduler.start()
except:
pass
elif not sickbeard.PROCESS_AUTOMATICALLY:
sickbeard.autoPostProcesserScheduler.stop.set()
sickbeard.autoPostProcesserScheduler.silent = True
try:
sickbeard.autoPostProcesserScheduler.join(5)
except:
pass
if unpack:
if self.isRarSupported() != 'not supported':
sickbeard.UNPACK = config.checkbox_to_value(unpack)
else:
sickbeard.UNPACK = 0
results.append("Unpacking Not Supported, disabling unpack setting")
else:
sickbeard.UNPACK = config.checkbox_to_value(unpack)
sickbeard.NO_DELETE = config.checkbox_to_value(no_delete)
sickbeard.KEEP_PROCESSED_DIR = config.checkbox_to_value(keep_processed_dir)
sickbeard.PROCESS_METHOD = process_method
sickbeard.DELRARCONTENTS = config.checkbox_to_value(del_rar_contents)
sickbeard.EXTRA_SCRIPTS = [x.strip() for x in extra_scripts.split('|') if x.strip()]
sickbeard.RENAME_EPISODES = config.checkbox_to_value(rename_episodes)
sickbeard.AIRDATE_EPISODES = config.checkbox_to_value(airdate_episodes)
sickbeard.MOVE_ASSOCIATED_FILES = config.checkbox_to_value(move_associated_files)
sickbeard.SYNC_FILES = sync_files
sickbeard.POSTPONE_IF_SYNC_FILES = config.checkbox_to_value(postpone_if_sync_files)
sickbeard.NAMING_CUSTOM_ABD = config.checkbox_to_value(naming_custom_abd)
sickbeard.NAMING_CUSTOM_SPORTS = config.checkbox_to_value(naming_custom_sports)
sickbeard.NAMING_CUSTOM_ANIME = config.checkbox_to_value(naming_custom_anime)
sickbeard.NAMING_STRIP_YEAR = config.checkbox_to_value(naming_strip_year)
sickbeard.USE_FAILED_DOWNLOADS = config.checkbox_to_value(use_failed_downloads)
sickbeard.DELETE_FAILED = config.checkbox_to_value(delete_failed)
sickbeard.SKIP_REMOVED_FILES = config.checkbox_to_value(skip_removed_files)
sickbeard.NFO_RENAME = config.checkbox_to_value(nfo_rename)
sickbeard.METADATA_KODI = kodi_data
sickbeard.METADATA_KODI_12PLUS = kodi_12plus_data
sickbeard.METADATA_MEDIABROWSER = mediabrowser_data
sickbeard.METADATA_PS3 = sony_ps3_data
sickbeard.METADATA_WDTV = wdtv_data
sickbeard.METADATA_TIVO = tivo_data
sickbeard.METADATA_MEDE8ER = mede8er_data
sickbeard.metadata_provider_dict['KODI'].set_config(sickbeard.METADATA_KODI)
sickbeard.metadata_provider_dict['KODI 12+'].set_config(sickbeard.METADATA_KODI_12PLUS)
sickbeard.metadata_provider_dict['MediaBrowser'].set_config(sickbeard.METADATA_MEDIABROWSER)
sickbeard.metadata_provider_dict['Sony PS3'].set_config(sickbeard.METADATA_PS3)
sickbeard.metadata_provider_dict['WDTV'].set_config(sickbeard.METADATA_WDTV)
sickbeard.metadata_provider_dict['TIVO'].set_config(sickbeard.METADATA_TIVO)
sickbeard.metadata_provider_dict['Mede8er'].set_config(sickbeard.METADATA_MEDE8ER)
if self.isNamingValid(naming_pattern, naming_multi_ep, anime_type=naming_anime) != "invalid":
sickbeard.NAMING_PATTERN = naming_pattern
sickbeard.NAMING_MULTI_EP = int(naming_multi_ep)
sickbeard.NAMING_ANIME = int(naming_anime)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
if int(naming_anime) in [1, 2]:
results.append("You tried saving an invalid anime naming config, not saving your naming settings")
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_anime_pattern, naming_anime_multi_ep, anime_type=naming_anime) != "invalid":
sickbeard.NAMING_ANIME_PATTERN = naming_anime_pattern
sickbeard.NAMING_ANIME_MULTI_EP = int(naming_anime_multi_ep)
sickbeard.NAMING_ANIME = int(naming_anime)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
if int(naming_anime) in [1, 2]:
results.append("You tried saving an invalid anime naming config, not saving your naming settings")
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_abd_pattern, None, abd=True) != "invalid":
sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern
else:
results.append(
"You tried saving an invalid air-by-date naming config, not saving your air-by-date settings")
if self.isNamingValid(naming_sports_pattern, None, sports=True) != "invalid":
sickbeard.NAMING_SPORTS_PATTERN = naming_sports_pattern
else:
results.append(
"You tried saving an invalid sports naming config, not saving your sports settings")
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/postProcessing/")
def testNaming(self, pattern=None, multi=None, abd=False, sports=False, anime_type=None):
if multi is not None:
multi = int(multi)
if anime_type is not None:
anime_type = int(anime_type)
result = naming.test_name(pattern, multi, abd, sports, anime_type)
result = ek.ek(os.path.join, result['dir'], result['name'])
return result
def isNamingValid(self, pattern=None, multi=None, abd=False, sports=False, anime_type=None):
if pattern is None:
return "invalid"
if multi is not None:
multi = int(multi)
if anime_type is not None:
anime_type = int(anime_type)
# air by date shows just need one check, we don't need to worry about season folders
if abd:
is_valid = naming.check_valid_abd_naming(pattern)
require_season_folders = False
# sport shows just need one check, we don't need to worry about season folders
elif sports:
is_valid = naming.check_valid_sports_naming(pattern)
require_season_folders = False
else:
# check validity of single and multi ep cases for the whole path
is_valid = naming.check_valid_naming(pattern, multi, anime_type)
# check validity of single and multi ep cases for only the file name
require_season_folders = naming.check_force_season_folders(pattern, multi, anime_type)
if is_valid and not require_season_folders:
return "valid"
elif is_valid and require_season_folders:
return "seasonfolders"
else:
return "invalid"
def isRarSupported(self):
"""
Test Packing Support:
- Simulating in memory rar extraction on test.rar file
"""
try:
rar_path = os.path.join(sickbeard.PROG_DIR, 'lib', 'unrar2', 'test.rar')
testing = RarFile(rar_path).read_files('*test.txt')
if testing[0][1] == 'This is only a test.':
return 'supported'
logger.log(u'Rar Not Supported: Can not read the content of test file', logger.ERROR)
return 'not supported'
except Exception, e:
logger.log(u'Rar Not Supported: ' + ex(e), logger.ERROR)
return 'not supported'
@route('/config/providers(/?.*)')
class ConfigProviders(Config):
def __init__(self, *args, **kwargs):
super(ConfigProviders, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_providers.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def canAddNewznabProvider(self, name):
if not name:
return json.dumps({'error': 'No Provider Name specified'})
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
tempProvider = newznab.NewznabProvider(name, '')
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Provider Name already exists as ' + providerDict[tempProvider.getID()].name})
else:
return json.dumps({'success': tempProvider.getID()})
def saveNewznabProvider(self, name, url, key=''):
if not name or not url:
return '0'
providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if name in providerDict:
if not providerDict[name].default:
providerDict[name].name = name
providerDict[name].url = config.clean_url(url)
providerDict[name].key = key
# a 0 in the key spot indicates that no key is needed
if key == '0':
providerDict[name].needs_auth = False
else:
providerDict[name].needs_auth = True
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = newznab.NewznabProvider(name, url, key=key)
sickbeard.newznabProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
def getNewznabCategories(self, name, url, key):
'''
Retrieves a list of possible categories with category id's
Using the default url/api?cat
http://yournewznaburl.com/api?t=caps&apikey=yourapikey
'''
error = ""
success = False
if not name:
error += "\nNo Provider Name specified"
if not url:
error += "\nNo Provider Url specified"
if not key:
error += "\nNo Provider Api key specified"
if error <> "":
return json.dumps({'success': False, 'error': error})
# Get list with Newznabproviders
# providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
# Get newznabprovider obj with provided name
tempProvider = newznab.NewznabProvider(name, url, key)
success, tv_categories, error = tempProvider.get_newznab_categories()
return json.dumps({'success': success, 'tv_categories': tv_categories, 'error': error})
def deleteNewznabProvider(self, nnid):
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if nnid not in providerDict or providerDict[nnid].default:
return '0'
# delete it from the list
sickbeard.newznabProviderList.remove(providerDict[nnid])
if nnid in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(nnid)
return '1'
def canAddTorrentRssProvider(self, name, url, cookies, titleTAG):
if not name:
return json.dumps({'error': 'Invalid name specified'})
providerDict = dict(
zip([x.getID() for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList))
tempProvider = rsstorrent.TorrentRssProvider(name, url, cookies, titleTAG)
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Exists as ' + providerDict[tempProvider.getID()].name})
else:
(succ, errMsg) = tempProvider.validateRSS()
if succ:
return json.dumps({'success': tempProvider.getID()})
else:
return json.dumps({'error': errMsg})
def saveTorrentRssProvider(self, name, url, cookies, titleTAG):
if not name or not url:
return '0'
providerDict = dict(zip([x.name for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList))
if name in providerDict:
providerDict[name].name = name
providerDict[name].url = config.clean_url(url)
providerDict[name].cookies = cookies
providerDict[name].titleTAG = titleTAG
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = rsstorrent.TorrentRssProvider(name, url, cookies, titleTAG)
sickbeard.torrentRssProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
def deleteTorrentRssProvider(self, id):
providerDict = dict(
zip([x.getID() for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList))
if id not in providerDict:
return '0'
# delete it from the list
sickbeard.torrentRssProviderList.remove(providerDict[id])
if id in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(id)
return '1'
def saveProviders(self, newznab_string='', torrentrss_string='', provider_order=None, **kwargs):
results = []
provider_str_list = provider_order.split()
provider_list = []
newznabProviderDict = dict(
zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
finishedNames = []
# add all the newznab info we got into our list
if newznab_string:
for curNewznabProviderStr in newznab_string.split('!!!'):
if not curNewznabProviderStr:
continue
cur_name, cur_url, cur_key, cur_cat = curNewznabProviderStr.split('|')
cur_url = config.clean_url(cur_url)
newProvider = newznab.NewznabProvider(cur_name, cur_url, key=cur_key)
cur_id = newProvider.getID()
# if it already exists then update it
if cur_id in newznabProviderDict:
newznabProviderDict[cur_id].name = cur_name
newznabProviderDict[cur_id].url = cur_url
newznabProviderDict[cur_id].key = cur_key
newznabProviderDict[cur_id].catIDs = cur_cat
# a 0 in the key spot indicates that no key is needed
if cur_key == '0':
newznabProviderDict[cur_id].needs_auth = False
else:
newznabProviderDict[cur_id].needs_auth = True
try:
newznabProviderDict[cur_id].search_mode = str(kwargs[cur_id + '_search_mode']).strip()
except:
pass
try:
newznabProviderDict[cur_id].search_fallback = config.checkbox_to_value(
kwargs[cur_id + '_search_fallback'])
except:
newznabProviderDict[cur_id].search_fallback = 0
try:
newznabProviderDict[cur_id].enable_daily = config.checkbox_to_value(
kwargs[cur_id + '_enable_daily'])
except:
newznabProviderDict[cur_id].enable_daily = 0
try:
newznabProviderDict[cur_id].enable_backlog = config.checkbox_to_value(
kwargs[cur_id + '_enable_backlog'])
except:
newznabProviderDict[cur_id].enable_backlog = 0
else:
sickbeard.newznabProviderList.append(newProvider)
finishedNames.append(cur_id)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
torrentRssProviderDict = dict(
zip([x.getID() for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList))
finishedNames = []
if torrentrss_string:
for curTorrentRssProviderStr in torrentrss_string.split('!!!'):
if not curTorrentRssProviderStr:
continue
curName, curURL, curCookies, curTitleTAG = curTorrentRssProviderStr.split('|')
curURL = config.clean_url(curURL)
newProvider = rsstorrent.TorrentRssProvider(curName, curURL, curCookies, curTitleTAG)
curID = newProvider.getID()
# if it already exists then update it
if curID in torrentRssProviderDict:
torrentRssProviderDict[curID].name = curName
torrentRssProviderDict[curID].url = curURL
torrentRssProviderDict[curID].cookies = curCookies
torrentRssProviderDict[curID].curTitleTAG = curTitleTAG
else:
sickbeard.torrentRssProviderList.append(newProvider)
finishedNames.append(curID)
# delete anything that is missing
for curProvider in sickbeard.torrentRssProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.torrentRssProviderList.remove(curProvider)
# do the enable/disable
for curProviderStr in provider_str_list:
curProvider, curEnabled = curProviderStr.split(':')
curEnabled = config.to_int(curEnabled)
curProvObj = [x for x in sickbeard.providers.sortedProviderList() if
x.getID() == curProvider and hasattr(x, 'enabled')]
if curProvObj:
curProvObj[0].enabled = bool(curEnabled)
provider_list.append(curProvider)
if curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
elif curProvider in torrentRssProviderDict:
torrentRssProviderDict[curProvider].enabled = bool(curEnabled)
# dynamically load provider settings
for curTorrentProvider in [curProvider for curProvider in sickbeard.providers.sortedProviderList() if
curProvider.providerType == sickbeard.GenericProvider.TORRENT]:
if hasattr(curTorrentProvider, 'minseed'):
try:
curTorrentProvider.minseed = int(str(kwargs[curTorrentProvider.getID() + '_minseed']).strip())
except:
curTorrentProvider.minseed = 0
if hasattr(curTorrentProvider, 'minleech'):
try:
curTorrentProvider.minleech = int(str(kwargs[curTorrentProvider.getID() + '_minleech']).strip())
except:
curTorrentProvider.minleech = 0
if hasattr(curTorrentProvider, 'ratio'):
try:
curTorrentProvider.ratio = str(kwargs[curTorrentProvider.getID() + '_ratio']).strip()
except:
curTorrentProvider.ratio = None
if hasattr(curTorrentProvider, 'digest'):
try:
curTorrentProvider.digest = str(kwargs[curTorrentProvider.getID() + '_digest']).strip()
except:
curTorrentProvider.digest = None
if hasattr(curTorrentProvider, 'hash'):
try:
curTorrentProvider.hash = str(kwargs[curTorrentProvider.getID() + '_hash']).strip()
except:
curTorrentProvider.hash = None
if hasattr(curTorrentProvider, 'api_key'):
try:
curTorrentProvider.api_key = str(kwargs[curTorrentProvider.getID() + '_api_key']).strip()
except:
curTorrentProvider.api_key = None
if hasattr(curTorrentProvider, 'username'):
try:
curTorrentProvider.username = str(kwargs[curTorrentProvider.getID() + '_username']).strip()
except:
curTorrentProvider.username = None
if hasattr(curTorrentProvider, 'password'):
try:
curTorrentProvider.password = str(kwargs[curTorrentProvider.getID() + '_password']).strip()
except:
curTorrentProvider.password = None
if hasattr(curTorrentProvider, 'passkey'):
try:
curTorrentProvider.passkey = str(kwargs[curTorrentProvider.getID() + '_passkey']).strip()
except:
curTorrentProvider.passkey = None
if hasattr(curTorrentProvider, 'confirmed'):
try:
curTorrentProvider.confirmed = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_confirmed'])
except:
curTorrentProvider.confirmed = 0
if hasattr(curTorrentProvider, 'proxy'):
try:
curTorrentProvider.proxy.enabled = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_proxy'])
except:
curTorrentProvider.proxy.enabled = 0
if hasattr(curTorrentProvider.proxy, 'url'):
try:
curTorrentProvider.proxy.url = str(kwargs[curTorrentProvider.getID() + '_proxy_url']).strip()
except:
curTorrentProvider.proxy.url = None
if hasattr(curTorrentProvider, 'freeleech'):
try:
curTorrentProvider.freeleech = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_freeleech'])
except:
curTorrentProvider.freeleech = 0
if hasattr(curTorrentProvider, 'search_mode'):
try:
curTorrentProvider.search_mode = str(kwargs[curTorrentProvider.getID() + '_search_mode']).strip()
except:
curTorrentProvider.search_mode = 'eponly'
if hasattr(curTorrentProvider, 'search_fallback'):
try:
curTorrentProvider.search_fallback = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_search_fallback'])
except:
curTorrentProvider.search_fallback = 0 # these exceptions are catching unselected checkboxes
if hasattr(curTorrentProvider, 'enable_daily'):
try:
curTorrentProvider.enable_daily = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_enable_daily'])
except:
curTorrentProvider.enable_daily = 0 # these exceptions are actually catching unselected checkboxes
if hasattr(curTorrentProvider, 'enable_backlog'):
try:
curTorrentProvider.enable_backlog = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_enable_backlog'])
except:
curTorrentProvider.enable_backlog = 0 # these exceptions are actually catching unselected checkboxes
if hasattr(curTorrentProvider, 'cat'):
try:
curTorrentProvider.cat = int(str(kwargs[curTorrentProvider.getID() + '_cat']).strip())
except:
curTorrentProvider.cat = 0
if hasattr(curTorrentProvider, 'subtitle'):
try:
curTorrentProvider.subtitle = config.checkbox_to_value(
kwargs[curTorrentProvider.getID() + '_subtitle'])
except:
curTorrentProvider.subtitle = 0
for curNzbProvider in [curProvider for curProvider in sickbeard.providers.sortedProviderList() if
curProvider.providerType == sickbeard.GenericProvider.NZB]:
if hasattr(curNzbProvider, 'api_key'):
try:
curNzbProvider.api_key = str(kwargs[curNzbProvider.getID() + '_api_key']).strip()
except:
curNzbProvider.api_key = None
if hasattr(curNzbProvider, 'username'):
try:
curNzbProvider.username = str(kwargs[curNzbProvider.getID() + '_username']).strip()
except:
curNzbProvider.username = None
if hasattr(curNzbProvider, 'search_mode'):
try:
curNzbProvider.search_mode = str(kwargs[curNzbProvider.getID() + '_search_mode']).strip()
except:
curNzbProvider.search_mode = 'eponly'
if hasattr(curNzbProvider, 'search_fallback'):
try:
curNzbProvider.search_fallback = config.checkbox_to_value(
kwargs[curNzbProvider.getID() + '_search_fallback'])
except:
curNzbProvider.search_fallback = 0 # these exceptions are actually catching unselected checkboxes
if hasattr(curNzbProvider, 'enable_daily'):
try:
curNzbProvider.enable_daily = config.checkbox_to_value(
kwargs[curNzbProvider.getID() + '_enable_daily'])
except:
curNzbProvider.enable_daily = 0 # these exceptions are actually catching unselected checkboxes
if hasattr(curNzbProvider, 'enable_backlog'):
try:
curNzbProvider.enable_backlog = config.checkbox_to_value(
kwargs[curNzbProvider.getID() + '_enable_backlog'])
except:
curNzbProvider.enable_backlog = 0 # these exceptions are actually catching unselected checkboxes
sickbeard.NEWZNAB_DATA = '!!!'.join([x.configStr() for x in sickbeard.newznabProviderList])
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/providers/")
@route('/config/notifications(/?.*)')
class ConfigNotifications(Config):
def __init__(self, *args, **kwargs):
super(ConfigNotifications, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_notifications.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def saveNotifications(self, use_kodi=None, kodi_always_on=None, kodi_notify_onsnatch=None,
kodi_notify_ondownload=None,
kodi_notify_onsubtitledownload=None, kodi_update_onlyfirst=None,
kodi_update_library=None, kodi_update_full=None, kodi_host=None, kodi_username=None,
kodi_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None,
plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_server_token=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None,
growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_freemobile=None, freemobile_notify_onsnatch=None, freemobile_notify_ondownload=None,
freemobile_notify_onsubtitledownload=None, freemobile_id=None, freemobile_apikey=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None,
prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None,
twitter_notify_onsubtitledownload=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None,
boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None,
boxcar2_notify_onsubtitledownload=None, boxcar2_accesstoken=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None,
pushover_notify_onsubtitledownload=None, pushover_userkey=None, pushover_apikey=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None,
libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None,
trakt_remove_watchlist=None, trakt_sync_watchlist=None, trakt_method_add=None,
trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None,
trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_disable_ssl_verify=None, trakt_timeout=None, trakt_blacklist_name=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None,
synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None,
pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None,
nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None,
pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None,
pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None,
pushbullet_device_list=None,
use_email=None, email_notify_onsnatch=None, email_notify_ondownload=None,
email_notify_onsubtitledownload=None, email_host=None, email_port=25, email_from=None,
email_tls=None, email_user=None, email_password=None, email_list=None, email_show_list=None,
email_show=None):
results = []
sickbeard.USE_KODI = config.checkbox_to_value(use_kodi)
sickbeard.KODI_ALWAYS_ON = config.checkbox_to_value(kodi_always_on)
sickbeard.KODI_NOTIFY_ONSNATCH = config.checkbox_to_value(kodi_notify_onsnatch)
sickbeard.KODI_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(kodi_notify_ondownload)
sickbeard.KODI_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(kodi_notify_onsubtitledownload)
sickbeard.KODI_UPDATE_LIBRARY = config.checkbox_to_value(kodi_update_library)
sickbeard.KODI_UPDATE_FULL = config.checkbox_to_value(kodi_update_full)
sickbeard.KODI_UPDATE_ONLYFIRST = config.checkbox_to_value(kodi_update_onlyfirst)
sickbeard.KODI_HOST = config.clean_hosts(kodi_host)
sickbeard.KODI_USERNAME = kodi_username
sickbeard.KODI_PASSWORD = kodi_password
sickbeard.USE_PLEX = config.checkbox_to_value(use_plex)
sickbeard.PLEX_NOTIFY_ONSNATCH = config.checkbox_to_value(plex_notify_onsnatch)
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(plex_notify_ondownload)
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(plex_notify_onsubtitledownload)
sickbeard.PLEX_UPDATE_LIBRARY = config.checkbox_to_value(plex_update_library)
sickbeard.PLEX_HOST = config.clean_hosts(plex_host)
sickbeard.PLEX_SERVER_HOST = config.clean_host(plex_server_host)
sickbeard.PLEX_SERVER_TOKEN = config.clean_host(plex_server_token)
sickbeard.PLEX_USERNAME = plex_username
sickbeard.PLEX_PASSWORD = plex_password
sickbeard.USE_GROWL = config.checkbox_to_value(use_growl)
sickbeard.GROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(growl_notify_onsnatch)
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(growl_notify_ondownload)
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(growl_notify_onsubtitledownload)
sickbeard.GROWL_HOST = config.clean_host(growl_host, default_port=23053)
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_FREEMOBILE = config.checkbox_to_value(use_freemobile)
sickbeard.FREEMOBILE_NOTIFY_ONSNATCH = config.checkbox_to_value(freemobile_notify_onsnatch)
sickbeard.FREEMOBILE_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(freemobile_notify_ondownload)
sickbeard.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(freemobile_notify_onsubtitledownload)
sickbeard.FREEMOBILE_ID = freemobile_id
sickbeard.FREEMOBILE_APIKEY = freemobile_apikey
sickbeard.USE_PROWL = config.checkbox_to_value(use_prowl)
sickbeard.PROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(prowl_notify_onsnatch)
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(prowl_notify_ondownload)
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(prowl_notify_onsubtitledownload)
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.USE_TWITTER = config.checkbox_to_value(use_twitter)
sickbeard.TWITTER_NOTIFY_ONSNATCH = config.checkbox_to_value(twitter_notify_onsnatch)
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(twitter_notify_ondownload)
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(twitter_notify_onsubtitledownload)
sickbeard.USE_BOXCAR = config.checkbox_to_value(use_boxcar)
sickbeard.BOXCAR_NOTIFY_ONSNATCH = config.checkbox_to_value(boxcar_notify_onsnatch)
sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(boxcar_notify_ondownload)
sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(boxcar_notify_onsubtitledownload)
sickbeard.BOXCAR_USERNAME = boxcar_username
sickbeard.USE_BOXCAR2 = config.checkbox_to_value(use_boxcar2)
sickbeard.BOXCAR2_NOTIFY_ONSNATCH = config.checkbox_to_value(boxcar2_notify_onsnatch)
sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(boxcar2_notify_ondownload)
sickbeard.BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(boxcar2_notify_onsubtitledownload)
sickbeard.BOXCAR2_ACCESSTOKEN = boxcar2_accesstoken
sickbeard.USE_PUSHOVER = config.checkbox_to_value(use_pushover)
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = config.checkbox_to_value(pushover_notify_onsnatch)
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushover_notify_ondownload)
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushover_notify_onsubtitledownload)
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.PUSHOVER_APIKEY = pushover_apikey
sickbeard.USE_LIBNOTIFY = config.checkbox_to_value(use_libnotify)
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = config.checkbox_to_value(libnotify_notify_onsnatch)
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(libnotify_notify_ondownload)
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(libnotify_notify_onsubtitledownload)
sickbeard.USE_NMJ = config.checkbox_to_value(use_nmj)
sickbeard.NMJ_HOST = config.clean_host(nmj_host)
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_NMJv2 = config.checkbox_to_value(use_nmjv2)
sickbeard.NMJv2_HOST = config.clean_host(nmjv2_host)
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_SYNOINDEX = config.checkbox_to_value(use_synoindex)
sickbeard.USE_SYNOLOGYNOTIFIER = config.checkbox_to_value(use_synologynotifier)
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = config.checkbox_to_value(synologynotifier_notify_onsnatch)
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(synologynotifier_notify_ondownload)
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(
synologynotifier_notify_onsubtitledownload)
sickbeard.USE_TRAKT = config.checkbox_to_value(use_trakt)
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_PASSWORD = trakt_password
sickbeard.TRAKT_REMOVE_WATCHLIST = config.checkbox_to_value(trakt_remove_watchlist)
sickbeard.TRAKT_REMOVE_SERIESLIST = config.checkbox_to_value(trakt_remove_serieslist)
sickbeard.TRAKT_SYNC_WATCHLIST = config.checkbox_to_value(trakt_sync_watchlist)
sickbeard.TRAKT_METHOD_ADD = int(trakt_method_add)
sickbeard.TRAKT_START_PAUSED = config.checkbox_to_value(trakt_start_paused)
sickbeard.TRAKT_USE_RECOMMENDED = config.checkbox_to_value(trakt_use_recommended)
sickbeard.TRAKT_SYNC = config.checkbox_to_value(trakt_sync)
sickbeard.TRAKT_DEFAULT_INDEXER = int(trakt_default_indexer)
sickbeard.TRAKT_DISABLE_SSL_VERIFY = config.checkbox_to_value(trakt_disable_ssl_verify)
sickbeard.TRAKT_TIMEOUT = int(trakt_timeout)
sickbeard.TRAKT_BLACKLIST_NAME = trakt_blacklist_name
if sickbeard.USE_TRAKT:
sickbeard.traktCheckerScheduler.silent = False
else:
sickbeard.traktCheckerScheduler.silent = True
sickbeard.USE_EMAIL = config.checkbox_to_value(use_email)
sickbeard.EMAIL_NOTIFY_ONSNATCH = config.checkbox_to_value(email_notify_onsnatch)
sickbeard.EMAIL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(email_notify_ondownload)
sickbeard.EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(email_notify_onsubtitledownload)
sickbeard.EMAIL_HOST = config.clean_host(email_host)
sickbeard.EMAIL_PORT = config.to_int(email_port, default=25)
sickbeard.EMAIL_FROM = email_from
sickbeard.EMAIL_TLS = config.checkbox_to_value(email_tls)
sickbeard.EMAIL_USER = email_user
sickbeard.EMAIL_PASSWORD = email_password
sickbeard.EMAIL_LIST = email_list
sickbeard.USE_PYTIVO = config.checkbox_to_value(use_pytivo)
sickbeard.PYTIVO_NOTIFY_ONSNATCH = config.checkbox_to_value(pytivo_notify_onsnatch)
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pytivo_notify_ondownload)
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pytivo_notify_onsubtitledownload)
sickbeard.PYTIVO_UPDATE_LIBRARY = config.checkbox_to_value(pytivo_update_library)
sickbeard.PYTIVO_HOST = config.clean_host(pytivo_host)
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = config.checkbox_to_value(use_nma)
sickbeard.NMA_NOTIFY_ONSNATCH = config.checkbox_to_value(nma_notify_onsnatch)
sickbeard.NMA_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(nma_notify_ondownload)
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(nma_notify_onsubtitledownload)
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_PUSHALOT = config.checkbox_to_value(use_pushalot)
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = config.checkbox_to_value(pushalot_notify_onsnatch)
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushalot_notify_ondownload)
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushalot_notify_onsubtitledownload)
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.USE_PUSHBULLET = config.checkbox_to_value(use_pushbullet)
sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = config.checkbox_to_value(pushbullet_notify_onsnatch)
sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushbullet_notify_ondownload)
sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushbullet_notify_onsubtitledownload)
sickbeard.PUSHBULLET_API = pushbullet_api
sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/notifications/")
@route('/config/subtitles(/?.*)')
class ConfigSubtitles(Config):
def __init__(self, *args, **kwargs):
super(ConfigSubtitles, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_subtitles.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def saveSubtitles(self, use_subtitles=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None,
service_order=None, subtitles_history=None, subtitles_finder_frequency=None,
subtitles_multi=None):
results = []
if subtitles_finder_frequency == '' or subtitles_finder_frequency is None:
subtitles_finder_frequency = 1
if use_subtitles == "on" and not sickbeard.subtitlesFinderScheduler.isAlive():
sickbeard.subtitlesFinderScheduler.silent = False
try:
sickbeard.subtitlesFinderScheduler.start()
except:
pass
elif not use_subtitles == "on":
sickbeard.subtitlesFinderScheduler.stop.set()
sickbeard.subtitlesFinderScheduler.silent = True
try:
sickbeard.subtitlesFinderScheduler.join(5)
except:
pass
sickbeard.USE_SUBTITLES = config.checkbox_to_value(use_subtitles)
sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage(
subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
sickbeard.SUBTITLES_DIR = subtitles_dir
sickbeard.SUBTITLES_HISTORY = config.checkbox_to_value(subtitles_history)
sickbeard.SUBTITLES_FINDER_FREQUENCY = config.to_int(subtitles_finder_frequency, default=1)
sickbeard.SUBTITLES_MULTI = config.checkbox_to_value(subtitles_multi)
# Subtitles services
services_str_list = service_order.split()
subtitles_services_list = []
subtitles_services_enabled = []
for curServiceStr in services_str_list:
curService, curEnabled = curServiceStr.split(':')
subtitles_services_list.append(curService)
subtitles_services_enabled.append(int(curEnabled))
sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/subtitles/")
@route('/config/anime(/?.*)')
class ConfigAnime(Config):
def __init__(self, *args, **kwargs):
super(ConfigAnime, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, file="config_anime.tmpl")
t.submenu = self.ConfigMenu()
return t.respond()
def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None,
split_home=None):
results = []
sickbeard.USE_ANIDB = config.checkbox_to_value(use_anidb)
sickbeard.ANIDB_USERNAME = anidb_username
sickbeard.ANIDB_PASSWORD = anidb_password
sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist)
sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home)
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE))
return self.redirect("/config/anime/")
@route('/errorlogs(/?.*)')
class ErrorLogs(WebRoot):
def __init__(self, *args, **kwargs):
super(ErrorLogs, self).__init__(*args, **kwargs)
def ErrorLogsMenu(self):
menu = [
{'title': 'Clear Errors', 'path': 'errorlogs/clearerrors/'},
{'title': 'Submit Errors', 'path': 'errorlogs/submit_errors/', 'requires': self.haveErrors},
]
return menu
def index(self):
t = PageTemplate(rh=self, file="errorlogs.tmpl")
t.submenu = self.ErrorLogsMenu()
return t.respond()
def haveErrors(self):
if len(classes.ErrorViewer.errors) > 0:
return True
def clearerrors(self):
classes.ErrorViewer.clear()
return self.redirect("/errorlogs/")
def viewlog(self, minLevel=logger.INFO, logFilter="<NONE>",logSearch=None, maxLines=500):
def Get_Data(Levelmin, data_in, lines_in, regex, Filter, Search, mlines):
lastLine = False
numLines = lines_in
numToShow = min(maxLines, numLines + len(data_in))
finalData = []
for x in reversed(data_in):
x = ek.ss(x)
match = re.match(regex, x)
if match:
level = match.group(7)
logName = match.group(8)
if level not in logger.reverseNames:
lastLine = False
continue
if logSearch and logSearch.lower() in x.lower():
lastLine = True
finalData.append(x)
numLines += 1
elif not logSearch and logger.reverseNames[level] >= minLevel and (logFilter == '<NONE>' or logName.startswith(logFilter)):
lastLine = True
finalData.append(x)
numLines += 1
else:
lastLine = False
continue
elif lastLine:
finalData.append("AA" + x)
numLines += 1
if numLines >= numToShow:
return finalData
return finalData
t = PageTemplate(rh=self, file="viewlogs.tmpl")
t.submenu = self.ErrorLogsMenu()
minLevel = int(minLevel)
logNameFilters = {'<NONE>': u'<No Filter>',
'DAILYSEARCHER': u'Daily Searcher',
'BACKLOG': u'Backlog',
'SHOWUPDATER': u'Show Updater',
'CHECKVERSION': u'Check Version',
'SHOWQUEUE': u'Show Queue',
'SEARCHQUEUE': u'Search Queue',
'FINDPROPERS': u'Find Propers',
'POSTPROCESSER': u'Postprocesser',
'FINDSUBTITLES': u'Find Subtitles',
'TRAKTCHECKER': u'Trakt Checker',
'EVENT': u'Event',
'ERROR': u'Error',
'TORNADO': u'Tornado',
'Thread': u'Thread',
'MAIN': u'Main'
}
if logFilter not in logNameFilters:
logFilter = '<NONE>'
regex = "^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$"
data = []
if os.path.isfile(logger.logFile):
with ek.ek(codecs.open, *[logger.logFile, 'r', 'utf-8']) as f:
data = Get_Data(minLevel, f.readlines(), 0, regex, logFilter, logSearch, maxLines)
for i in range (1 , int(sickbeard.LOG_NR)):
if os.path.isfile(logger.logFile + "." + str(i)) and (len(data) <= maxLines):
with ek.ek(codecs.open, *[logger.logFile + "." + str(i), 'r', 'utf-8']) as f:
data += Get_Data(minLevel, f.readlines(), len(data), regex, logFilter, logSearch, maxLines)
result = "".join(data)
t.logLines = result
t.minLevel = minLevel
t.logNameFilters = logNameFilters
t.logFilter = logFilter
t.logSearch = logSearch
return t.respond()
def submit_errors(self):
if not (sickbeard.GIT_USERNAME and sickbeard.GIT_PASSWORD):
ui.notifications.error("Missing information", "Please set your GitHub username and password in the config.")
logger.log(u'Please set your GitHub username and password in the config, unable to submit issue ticket to GitHub!')
else:
issue = logger.submit_errors()
if issue:
ui.notifications.message('Your issue ticket #%s was submitted successfully!' % issue.number)
return self.redirect("/errorlogs/")
| slightstone/SickRage | sickbeard/webserve.py | Python | gpl-3.0 | 214,679 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.