repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Jeff-Tian/mybnb | Python27/Lib/lib-tk/test/test_ttk/test_functions.py | 2 | 17810 | # -*- encoding: utf-8 -*-
import sys
import unittest
import ttk
class MockTkApp:
def splitlist(self, arg):
if isinstance(arg, tuple):
return arg
return arg.split(':')
def wantobjects(self):
return True
class MockTclObj(object):
typename = 'test'
def __init__(self, val):
self.val = val
def __str__(self):
return unicode(self.val)
class MockStateSpec(object):
typename = 'StateSpec'
def __init__(self, *args):
self.val = args
def __str__(self):
return ' '.join(self.val)
class InternalFunctionsTest(unittest.TestCase):
def test_format_optdict(self):
def check_against(fmt_opts, result):
for i in range(0, len(fmt_opts), 2):
self.assertEqual(result.pop(fmt_opts[i]), fmt_opts[i + 1])
if result:
self.fail("result still got elements: %s" % result)
# passing an empty dict should return an empty object (tuple here)
self.assertFalse(ttk._format_optdict({}))
# check list formatting
check_against(
ttk._format_optdict({'fg': 'blue', 'padding': [1, 2, 3, 4]}),
{'-fg': 'blue', '-padding': '1 2 3 4'})
# check tuple formatting (same as list)
check_against(
ttk._format_optdict({'test': (1, 2, '', 0)}),
{'-test': '1 2 {} 0'})
# check untouched values
check_against(
ttk._format_optdict({'test': {'left': 'as is'}}),
{'-test': {'left': 'as is'}})
# check script formatting
check_against(
ttk._format_optdict(
{'test': [1, -1, '', '2m', 0], 'test2': 3,
'test3': '', 'test4': 'abc def',
'test5': '"abc"', 'test6': '{}',
'test7': '} -spam {'}, script=True),
{'-test': '{1 -1 {} 2m 0}', '-test2': '3',
'-test3': '{}', '-test4': '{abc def}',
'-test5': '{"abc"}', '-test6': r'\{\}',
'-test7': r'\}\ -spam\ \{'})
opts = {u'αβγ': True, u'á': False}
orig_opts = opts.copy()
# check if giving unicode keys is fine
check_against(ttk._format_optdict(opts), {u'-αβγ': True, u'-á': False})
# opts should remain unchanged
self.assertEqual(opts, orig_opts)
# passing values with spaces inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('one two', 'three')}),
{'-option': '{one two} three'})
check_against(
ttk._format_optdict(
{'option': ('one\ttwo', 'three')}),
{'-option': '{one\ttwo} three'})
# passing empty strings inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('', 'one')}),
{'-option': '{} one'})
# passing values with braces inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('one} {two', 'three')}),
{'-option': r'one\}\ \{two three'})
# passing quoted strings inside a tuple/list
check_against(
ttk._format_optdict(
{'option': ('"one"', 'two')}),
{'-option': '{"one"} two'})
check_against(
ttk._format_optdict(
{'option': ('{one}', 'two')}),
{'-option': r'\{one\} two'})
# ignore an option
amount_opts = len(ttk._format_optdict(opts, ignore=(u'á'))) // 2
self.assertEqual(amount_opts, len(opts) - 1)
# ignore non-existing options
amount_opts = len(ttk._format_optdict(opts, ignore=(u'á', 'b'))) // 2
self.assertEqual(amount_opts, len(opts) - 1)
# ignore every option
self.assertFalse(ttk._format_optdict(opts, ignore=opts.keys()))
def test_format_mapdict(self):
opts = {'a': [('b', 'c', 'val'), ('d', 'otherval'), ('', 'single')]}
result = ttk._format_mapdict(opts)
self.assertEqual(len(result), len(opts.keys()) * 2)
self.assertEqual(result, ('-a', '{b c} val d otherval {} single'))
self.assertEqual(ttk._format_mapdict(opts, script=True),
('-a', '{{b c} val d otherval {} single}'))
self.assertEqual(ttk._format_mapdict({2: []}), ('-2', ''))
opts = {u'üñíćódè': [(u'á', u'vãl')]}
result = ttk._format_mapdict(opts)
self.assertEqual(result, (u'-üñíćódè', u'á vãl'))
# empty states
valid = {'opt': [('', u'', 'hi')]}
self.assertEqual(ttk._format_mapdict(valid), ('-opt', '{ } hi'))
# when passing multiple states, they all must be strings
invalid = {'opt': [(1, 2, 'valid val')]}
self.assertRaises(TypeError, ttk._format_mapdict, invalid)
invalid = {'opt': [([1], '2', 'valid val')]}
self.assertRaises(TypeError, ttk._format_mapdict, invalid)
# but when passing a single state, it can be anything
valid = {'opt': [[1, 'value']]}
self.assertEqual(ttk._format_mapdict(valid), ('-opt', '1 value'))
# special attention to single states which evalute to False
for stateval in (None, 0, False, '', set()): # just some samples
valid = {'opt': [(stateval, 'value')]}
self.assertEqual(ttk._format_mapdict(valid),
('-opt', '{} value'))
# values must be iterable
opts = {'a': None}
self.assertRaises(TypeError, ttk._format_mapdict, opts)
# items in the value must have size >= 2
self.assertRaises(IndexError, ttk._format_mapdict,
{'a': [('invalid', )]})
def test_format_elemcreate(self):
self.assertTrue(ttk._format_elemcreate(None), (None, ()))
## Testing type = image
# image type expects at least an image name, so this should raise
# IndexError since it tries to access the index 0 of an empty tuple
self.assertRaises(IndexError, ttk._format_elemcreate, 'image')
# don't format returned values as a tcl script
# minimum acceptable for image type
self.assertEqual(ttk._format_elemcreate('image', False, 'test'),
("test ", ()))
# specifying a state spec
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('', 'a')), ("test {} a", ()))
# state spec with multiple states
self.assertEqual(ttk._format_elemcreate('image', False, 'test',
('a', 'b', 'c')), ("test {a b} c", ()))
# state spec and options
res = ttk._format_elemcreate('image', False, 'test',
('a', 'b'), a='x', b='y')
self.assertEqual(res[0], "test a b")
self.assertEqual(set(res[1]), {"-a", "x", "-b", "y"})
# format returned values as a tcl script
# state spec with multiple states and an option with a multivalue
self.assertEqual(ttk._format_elemcreate('image', True, 'test',
('a', 'b', 'c', 'd'), x=[2, 3]), ("{test {a b c} d}", "-x {2 3}"))
## Testing type = vsapi
# vsapi type expects at least a class name and a part_id, so this
# should raise an ValueError since it tries to get two elements from
# an empty tuple
self.assertRaises(ValueError, ttk._format_elemcreate, 'vsapi')
# don't format returned values as a tcl script
# minimum acceptable for vsapi
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b'),
("a b ", ()))
# now with a state spec with multiple states
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b',
('a', 'b', 'c')), ("a b {a b} c", ()))
# state spec and option
self.assertEqual(ttk._format_elemcreate('vsapi', False, 'a', 'b',
('a', 'b'), opt='x'), ("a b a b", ("-opt", "x")))
# format returned values as a tcl script
# state spec with a multivalue and an option
self.assertEqual(ttk._format_elemcreate('vsapi', True, 'a', 'b',
('a', 'b', [1, 2]), opt='x'), ("{a b {a b} {1 2}}", "-opt x"))
# Testing type = from
# from type expects at least a type name
self.assertRaises(IndexError, ttk._format_elemcreate, 'from')
self.assertEqual(ttk._format_elemcreate('from', False, 'a'),
('a', ()))
self.assertEqual(ttk._format_elemcreate('from', False, 'a', 'b'),
('a', ('b', )))
self.assertEqual(ttk._format_elemcreate('from', True, 'a', 'b'),
('{a}', 'b'))
def test_format_layoutlist(self):
def sample(indent=0, indent_size=2):
return ttk._format_layoutlist(
[('a', {'other': [1, 2, 3], 'children':
[('b', {'children':
[('c', {'children':
[('d', {'nice': 'opt'})], 'something': (1, 2)
})]
})]
})], indent=indent, indent_size=indent_size)[0]
def sample_expected(indent=0, indent_size=2):
spaces = lambda amount=0: ' ' * (amount + indent)
return (
"%sa -other {1 2 3} -children {\n"
"%sb -children {\n"
"%sc -something {1 2} -children {\n"
"%sd -nice opt\n"
"%s}\n"
"%s}\n"
"%s}" % (spaces(), spaces(indent_size),
spaces(2 * indent_size), spaces(3 * indent_size),
spaces(2 * indent_size), spaces(indent_size), spaces()))
# empty layout
self.assertEqual(ttk._format_layoutlist([])[0], '')
# smallest (after an empty one) acceptable layout
smallest = ttk._format_layoutlist([('a', None)], indent=0)
self.assertEqual(smallest,
ttk._format_layoutlist([('a', '')], indent=0))
self.assertEqual(smallest[0], 'a')
# testing indentation levels
self.assertEqual(sample(), sample_expected())
for i in range(4):
self.assertEqual(sample(i), sample_expected(i))
self.assertEqual(sample(i, i), sample_expected(i, i))
# invalid layout format, different kind of exceptions will be
# raised
# plain wrong format
self.assertRaises(ValueError, ttk._format_layoutlist,
['bad', 'format'])
self.assertRaises(TypeError, ttk._format_layoutlist, None)
# _format_layoutlist always expects the second item (in every item)
# to act like a dict (except when the value evalutes to False).
self.assertRaises(AttributeError,
ttk._format_layoutlist, [('a', 'b')])
# bad children formatting
self.assertRaises(ValueError, ttk._format_layoutlist,
[('name', {'children': {'a': None}})])
def test_script_from_settings(self):
# empty options
self.assertFalse(ttk._script_from_settings({'name':
{'configure': None, 'map': None, 'element create': None}}))
# empty layout
self.assertEqual(
ttk._script_from_settings({'name': {'layout': None}}),
"ttk::style layout name {\nnull\n}")
configdict = {u'αβγ': True, u'á': False}
self.assertTrue(
ttk._script_from_settings({'name': {'configure': configdict}}))
mapdict = {u'üñíćódè': [(u'á', u'vãl')]}
self.assertTrue(
ttk._script_from_settings({'name': {'map': mapdict}}))
# invalid image element
self.assertRaises(IndexError,
ttk._script_from_settings, {'name': {'element create': ['image']}})
# minimal valid image
self.assertTrue(ttk._script_from_settings({'name':
{'element create': ['image', 'name']}}))
image = {'thing': {'element create':
['image', 'name', ('state1', 'state2', 'val')]}}
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} ")
image['thing']['element create'].append({'opt': 30})
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} "
"-opt 30")
image['thing']['element create'][-1]['opt'] = [MockTclObj(3),
MockTclObj('2m')]
self.assertEqual(ttk._script_from_settings(image),
"ttk::style element create thing image {name {state1 state2} val} "
"-opt {3 2m}")
def test_tclobj_to_py(self):
self.assertEqual(
ttk._tclobj_to_py((MockStateSpec('a', 'b'), 'val')),
[('a', 'b', 'val')])
self.assertEqual(
ttk._tclobj_to_py([MockTclObj('1'), 2, MockTclObj('3m')]),
[1, 2, '3m'])
def test_list_from_statespec(self):
def test_it(sspec, value, res_value, states):
self.assertEqual(ttk._list_from_statespec(
(sspec, value)), [states + (res_value, )])
states_even = tuple('state%d' % i for i in range(6))
statespec = MockStateSpec(*states_even)
test_it(statespec, 'val', 'val', states_even)
test_it(statespec, MockTclObj('val'), 'val', states_even)
states_odd = tuple('state%d' % i for i in range(5))
statespec = MockStateSpec(*states_odd)
test_it(statespec, 'val', 'val', states_odd)
test_it(('a', 'b', 'c'), MockTclObj('val'), 'val', ('a', 'b', 'c'))
def test_list_from_layouttuple(self):
tk = MockTkApp()
# empty layout tuple
self.assertFalse(ttk._list_from_layouttuple(tk, ()))
# shortest layout tuple
self.assertEqual(ttk._list_from_layouttuple(tk, ('name', )),
[('name', {})])
# not so interesting ltuple
sample_ltuple = ('name', '-option', 'value')
self.assertEqual(ttk._list_from_layouttuple(tk, sample_ltuple),
[('name', {'option': 'value'})])
# empty children
self.assertEqual(ttk._list_from_layouttuple(tk,
('something', '-children', ())),
[('something', {'children': []})]
)
# more interesting ltuple
ltuple = (
'name', '-option', 'niceone', '-children', (
('otherone', '-children', (
('child', )), '-otheropt', 'othervalue'
)
)
)
self.assertEqual(ttk._list_from_layouttuple(tk, ltuple),
[('name', {'option': 'niceone', 'children':
[('otherone', {'otheropt': 'othervalue', 'children':
[('child', {})]
})]
})]
)
# bad tuples
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus'))
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('name', 'no_minus', 'value'))
self.assertRaises(ValueError, ttk._list_from_layouttuple, tk,
('something', '-children')) # no children
def test_val_or_dict(self):
def func(res, opt=None, val=None):
if opt is None:
return res
if val is None:
return "test val"
return (opt, val)
tk = MockTkApp()
tk.call = func
self.assertEqual(ttk._val_or_dict(tk, {}, '-test:3'),
{'test': '3'})
self.assertEqual(ttk._val_or_dict(tk, {}, ('-test', 3)),
{'test': 3})
self.assertEqual(ttk._val_or_dict(tk, {'test': None}, 'x:y'),
'test val')
self.assertEqual(ttk._val_or_dict(tk, {'test': 3}, 'x:y'),
{'test': 3})
def test_convert_stringval(self):
tests = (
(0, 0), ('09', 9), ('a', 'a'), (u'áÚ', u'áÚ'), ([], '[]'),
(None, 'None')
)
for orig, expected in tests:
self.assertEqual(ttk._convert_stringval(orig), expected)
if sys.getdefaultencoding() == 'ascii':
self.assertRaises(UnicodeDecodeError,
ttk._convert_stringval, 'á')
class TclObjsToPyTest(unittest.TestCase):
def test_unicode(self):
adict = {'opt': u'välúè'}
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': u'välúè'})
adict['opt'] = MockTclObj(adict['opt'])
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': u'välúè'})
def test_multivalues(self):
adict = {'opt': [1, 2, 3, 4]}
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': [1, 2, 3, 4]})
adict['opt'] = [1, 'xm', 3]
self.assertEqual(ttk.tclobjs_to_py(adict), {'opt': [1, 'xm', 3]})
adict['opt'] = (MockStateSpec('a', 'b'), u'válũè')
self.assertEqual(ttk.tclobjs_to_py(adict),
{'opt': [('a', 'b', u'válũè')]})
self.assertEqual(ttk.tclobjs_to_py({'x': ['y z']}),
{'x': ['y z']})
def test_nosplit(self):
self.assertEqual(ttk.tclobjs_to_py({'text': 'some text'}),
{'text': 'some text'})
tests_nogui = (InternalFunctionsTest, TclObjsToPyTest)
if __name__ == "__main__":
from test.test_support import run_unittest
run_unittest(*tests_nogui)
| apache-2.0 |
laurentb/weboob | modules/journaldesfemmes/module.py | 1 | 2124 | # -*- coding: utf-8 -*-
# Copyright(C) 2018 Phyks (Lucas Verney)
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.tools.backend import Module
from weboob.capabilities.recipe import CapRecipe, Recipe
from .browser import JournaldesfemmesBrowser
__all__ = ['JournaldesfemmesModule']
class JournaldesfemmesModule(Module, CapRecipe):
NAME = 'journaldesfemmes'
DESCRIPTION = 'journaldesfemmes website'
MAINTAINER = 'Phyks (Lucas Verney)'
EMAIL = '[email protected]'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
BROWSER = JournaldesfemmesBrowser
def get_recipe(self, _id):
"""
Get a recipe object from an ID.
:param _id: ID of recipe
:type _id: str
:rtype: :class:`Recipe`
"""
return self.browser.get_recipe(_id)
def iter_recipes(self, pattern):
"""
Search recipes and iterate on results.
:param pattern: pattern to search
:type pattern: str
:rtype: iter[:class:`Recipe`]
"""
return self.browser.search_recipes(pattern)
def fill_recipe(self, recipe, fields):
if 'nb_person' in fields or 'instructions' in fields:
recipe = self.browser.get_recipe(recipe.id, recipe)
if 'comments' in fields:
recipe.comments = list(self.browser.get_comments(recipe.id))
return recipe
OBJECTS = {Recipe: fill_recipe}
| lgpl-3.0 |
TathagataChakraborti/resource-conflicts | PLANROB-2015/py2.5/lib/python2.5/lib-tk/Tkconstants.py | 375 | 1493 | # Symbolic constants for Tk
# Booleans
NO=FALSE=OFF=0
YES=TRUE=ON=1
# -anchor and -sticky
N='n'
S='s'
W='w'
E='e'
NW='nw'
SW='sw'
NE='ne'
SE='se'
NS='ns'
EW='ew'
NSEW='nsew'
CENTER='center'
# -fill
NONE='none'
X='x'
Y='y'
BOTH='both'
# -side
LEFT='left'
TOP='top'
RIGHT='right'
BOTTOM='bottom'
# -relief
RAISED='raised'
SUNKEN='sunken'
FLAT='flat'
RIDGE='ridge'
GROOVE='groove'
SOLID = 'solid'
# -orient
HORIZONTAL='horizontal'
VERTICAL='vertical'
# -tabs
NUMERIC='numeric'
# -wrap
CHAR='char'
WORD='word'
# -align
BASELINE='baseline'
# -bordermode
INSIDE='inside'
OUTSIDE='outside'
# Special tags, marks and insert positions
SEL='sel'
SEL_FIRST='sel.first'
SEL_LAST='sel.last'
END='end'
INSERT='insert'
CURRENT='current'
ANCHOR='anchor'
ALL='all' # e.g. Canvas.delete(ALL)
# Text widget and button states
NORMAL='normal'
DISABLED='disabled'
ACTIVE='active'
# Canvas state
HIDDEN='hidden'
# Menu item types
CASCADE='cascade'
CHECKBUTTON='checkbutton'
COMMAND='command'
RADIOBUTTON='radiobutton'
SEPARATOR='separator'
# Selection modes for list boxes
SINGLE='single'
BROWSE='browse'
MULTIPLE='multiple'
EXTENDED='extended'
# Activestyle for list boxes
# NONE='none' is also valid
DOTBOX='dotbox'
UNDERLINE='underline'
# Various canvas styles
PIESLICE='pieslice'
CHORD='chord'
ARC='arc'
FIRST='first'
LAST='last'
BUTT='butt'
PROJECTING='projecting'
ROUND='round'
BEVEL='bevel'
MITER='miter'
# Arguments to xview/yview
MOVETO='moveto'
SCROLL='scroll'
UNITS='units'
PAGES='pages'
| mit |
RT-Thread/rt-thread | bsp/tm4c129x/rtconfig.py | 12 | 3663 | # BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D)
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
PART_TYPE = 'PART_TM4C129XNCZAD'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:\ArdaArmTools\Sourcery_Lite\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:\Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# tool-chains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-tm4c129x.map,-cref,-u,Reset_Handler -T tm4c_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter tm4c_rom.sct --info sizes --info totals --info unused --info veneers --list rtthread-tm4c129x.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm' # + ' -D' + PART_TYPE
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "tm4c_rom.icf"'
LFLAGS += ' --entry __iar_program_start'
#LFLAGS += ' --silent'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = '' | apache-2.0 |
nhr/openshift-ansible | roles/lib_openshift/src/lib/project.py | 82 | 2503 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class ProjectConfig(OpenShiftCLIConfig):
''' project config object '''
def __init__(self, rname, namespace, kubeconfig, project_options):
super(ProjectConfig, self).__init__(rname, None, kubeconfig, project_options)
class Project(Yedit):
''' Class to wrap the oc command line tools '''
annotations_path = "metadata.annotations"
kind = 'Project'
annotation_prefix = 'openshift.io/'
def __init__(self, content):
'''Project constructor'''
super(Project, self).__init__(content=content)
def get_annotations(self):
''' return the annotations'''
return self.get(Project.annotations_path) or {}
def add_annotations(self, inc_annos):
''' add an annotation to the other annotations'''
if not isinstance(inc_annos, list):
inc_annos = [inc_annos]
annos = self.get_annotations()
if not annos:
self.put(Project.annotations_path, inc_annos)
else:
for anno in inc_annos:
for key, value in anno.items():
annos[key] = value
return True
def find_annotation(self, key):
''' find an annotation'''
annotations = self.get_annotations()
for anno in annotations:
if Project.annotation_prefix + key == anno:
return annotations[anno]
return None
def delete_annotation(self, inc_anno_keys):
''' remove an annotation from a project'''
if not isinstance(inc_anno_keys, list):
inc_anno_keys = [inc_anno_keys]
annos = self.get(Project.annotations_path) or {}
if not annos:
return True
removed = False
for inc_anno in inc_anno_keys:
anno = self.find_annotation(inc_anno)
if anno:
del annos[Project.annotation_prefix + anno]
removed = True
return removed
def update_annotation(self, key, value):
''' remove an annotation for a project'''
annos = self.get(Project.annotations_path) or {}
if not annos:
return True
updated = False
anno = self.find_annotation(key)
if anno:
annos[Project.annotation_prefix + key] = value
updated = True
else:
self.add_annotations({Project.annotation_prefix + key: value})
return updated
| apache-2.0 |
nicobot/electron | tools/mac/apply_locales.py | 202 | 1454 | #!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv)) | mit |
klen/peewee_migrate | tests/test_cli.py | 1 | 3157 | from click.testing import CliRunner
import pytest
from peewee_migrate.cli import cli, get_router
runner = CliRunner()
@pytest.fixture
def dir_option(tmpdir):
return '--directory=%s' % tmpdir
@pytest.fixture
def db_url(tmpdir):
db_path = '%s/test_sqlite.db' % tmpdir
open(db_path, 'a').close()
return 'sqlite:///%s' % db_path
@pytest.fixture
def db_option(db_url):
return '--database=%s' % db_url
@pytest.fixture
def router(tmpdir, db_url):
return lambda: get_router(str(tmpdir), db_url)
@pytest.fixture
def migrations(router):
migrations_number = 5
name = 'test'
for i in range(migrations_number):
router().create(name)
return ['00%s_test' % i for i in range(1, migrations_number + 1)]
@pytest.fixture
def migrations_str(migrations):
return ', '.join(migrations)
def test_help():
result = runner.invoke(cli, ['--help'])
assert result.exit_code == 0
assert 'migrate' in result.output
assert 'create' in result.output
assert 'rollback' in result.output
def test_create(dir_option, db_option):
for i in range(2):
result = runner.invoke(cli, ['create', dir_option, db_option, '-vvv', 'test'])
assert result.exit_code == 0
def test_migrate(dir_option, db_option, migrations_str):
result = runner.invoke(cli, ['migrate', dir_option, db_option])
assert result.exit_code == 0
assert 'Migrations completed: %s' % migrations_str in result.output
def test_list(dir_option, db_option, migrations):
result = runner.invoke(cli, ['list', dir_option, db_option])
assert 'Migrations are done:\n' in result.output
assert 'Migrations are undone:\n%s' % '\n'.join(migrations) in result.output
def test_rollback(dir_option, db_option, router, migrations):
router().run()
count_overflow = len(migrations) + 1
result = runner.invoke(cli, ['rollback', dir_option, db_option, '--count=%s' % count_overflow])
assert result.exception
assert 'Unable to rollback %s migrations' % count_overflow in result.exception.args[0]
assert router().done == migrations
result = runner.invoke(cli, ['rollback', dir_option, db_option])
assert not result.exception
assert router().done == migrations[:-1]
result = runner.invoke(cli, ['rollback', dir_option, db_option, '004_test'])
assert not result.exception
assert router().done == migrations[:-2]
result = runner.invoke(cli, ['rollback', dir_option, db_option, '--count=2'])
assert not result.exception
assert router().done == migrations[:-4]
result = runner.invoke(cli, ['rollback', dir_option, db_option, '005_test'])
assert result.exception
assert result.exception.args[0] == 'Only last migration can be canceled.'
assert router().done == migrations[:-4]
def test_fake(dir_option, db_option, migrations_str, router):
result = runner.invoke(cli, ['migrate', dir_option, db_option, '-v', '--fake'])
assert result.exit_code == 0
assert 'Migrations completed: %s' % migrations_str in result.output
# TODO: Find a way of testing fake. This is unclear why the following fails.
# assert not router().done
| bsd-3-clause |
akretion/bank-statement-reconcile-simple | account_statement_completion_label_simple/wizard/account_statement_label_create.py | 1 | 1772 | # Copyright 2018-2019 Akretion France (http://www.akretion.com/)
# @author: Alexis de Lattre <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
class AccountStatementLabelCreate(models.TransientModel):
_name = 'account.statement.label.create'
_description = 'Account Statement Label Create Wizard'
@api.model
def default_get(self, fields_list):
res = super(AccountStatementLabelCreate, self).default_get(
fields_list)
assert self._context.get('active_model') ==\
'account.bank.statement.line', 'Wrong active model'
assert self._context.get('active_id'), 'missing active_id in context'
line = self.env['account.bank.statement.line'].browse(
self._context['active_id'])
res.update({
'new_label': line.name,
'statement_line_id': line.id,
})
return res
statement_line_id = fields.Many2one(
'account.bank.statement.line', string='Bank Statement Line',
readonly=True)
current_label = fields.Char(
related='statement_line_id.name', readonly=True,
string='Statement Line Label')
new_label = fields.Char(string="New Label", required=True)
partner_id = fields.Many2one(
'res.partner', string='Partner', domain=[('parent_id', '=', False)],
required=True)
def run(self):
self.ensure_one()
self.env['account.statement.label'].create({
'partner_id': self.partner_id.id,
'label': self.new_label,
'company_id': self.statement_line_id.company_id.id,
})
self.statement_line_id.statement_id.update_statement_lines()
return True
| agpl-3.0 |
jumpserver/jumpserver | apps/ops/inventory.py | 2 | 5220 | # -*- coding: utf-8 -*-
#
from django.conf import settings
from .ansible.inventory import BaseInventory
from common.utils import get_logger
__all__ = [
'JMSInventory', 'JMSCustomInventory',
]
logger = get_logger(__file__)
class JMSBaseInventory(BaseInventory):
windows_ssh_default_shell = settings.WINDOWS_SSH_DEFAULT_SHELL
def convert_to_ansible(self, asset, run_as_admin=False):
info = {
'id': asset.id,
'hostname': asset.hostname,
'ip': asset.ip,
'port': asset.ssh_port,
'vars': dict(),
'groups': [],
}
if asset.domain and asset.domain.has_gateway():
info["vars"].update(self.make_proxy_command(asset))
if run_as_admin:
info.update(asset.get_auth_info())
if asset.is_unixlike():
info["become"] = asset.admin_user.become_info
if asset.is_windows():
info["vars"].update({
"ansible_connection": "ssh",
"ansible_shell_type": self.windows_ssh_default_shell,
})
for label in asset.labels.all():
info["vars"].update({
label.name: label.value
})
if asset.domain:
info["vars"].update({
"domain": asset.domain.name,
})
return info
@staticmethod
def make_proxy_command(asset):
gateway = asset.domain.random_gateway()
proxy_command_list = [
"ssh", "-o", "Port={}".format(gateway.port),
"-o", "StrictHostKeyChecking=no",
"{}@{}".format(gateway.username, gateway.ip),
"-W", "%h:%p", "-q",
]
if gateway.password:
proxy_command_list.insert(
0, "sshpass -p '{}'".format(gateway.password)
)
if gateway.private_key:
proxy_command_list.append("-i {}".format(gateway.private_key_file))
proxy_command = "'-o ProxyCommand={}'".format(
" ".join(proxy_command_list)
)
return {"ansible_ssh_common_args": proxy_command}
class JMSInventory(JMSBaseInventory):
"""
JMS Inventory is the inventory with jumpserver assets, so you can
write you own inventory, construct you inventory,
user_info is obtained from admin_user or asset_user
"""
def __init__(self, assets, run_as_admin=False, run_as=None, become_info=None, system_user=None):
"""
:param assets: assets
:param run_as_admin: True 是否使用管理用户去执行, 每台服务器的管理用户可能不同
:param run_as: 用户名(添加了统一的资产用户管理器之后AssetUserManager加上之后修改为username)
:param become_info: 是否become成某个用户去执行
"""
self.assets = assets
self.using_admin = run_as_admin
self.run_as = run_as
self.system_user = system_user
self.become_info = become_info
host_list = []
for asset in assets:
host = self.convert_to_ansible(asset, run_as_admin=run_as_admin)
if run_as is not None:
run_user_info = self.get_run_user_info(host)
host.update(run_user_info)
if become_info and asset.is_unixlike():
host.update(become_info)
host_list.append(host)
super().__init__(host_list=host_list)
def get_run_user_info(self, host):
from assets.backends import AssetUserManager
if not self.run_as and not self.system_user:
return {}
asset_id = host.get('id', '')
asset = self.assets.filter(id=asset_id).first()
if not asset:
logger.error('Host not found: ', asset_id)
if self.system_user:
self.system_user.load_asset_special_auth(asset=asset, username=self.run_as)
return self.system_user._to_secret_json()
try:
manager = AssetUserManager()
run_user = manager.get_latest(username=self.run_as, asset=asset, prefer='system_user')
return run_user._to_secret_json()
except Exception as e:
logger.error(e, exc_info=True)
return {}
class JMSCustomInventory(JMSBaseInventory):
"""
JMS Custom Inventory is the inventory with jumpserver assets,
user_info is obtained from custom parameter
"""
def __init__(self, assets, username, password=None, public_key=None, private_key=None):
"""
"""
self.assets = assets
self.username = username
self.password = password
self.public_key = public_key
self.private_key = private_key
host_list = []
for asset in assets:
host = self.convert_to_ansible(asset)
run_user_info = self.get_run_user_info()
host.update(run_user_info)
host_list.append(host)
super().__init__(host_list=host_list)
def get_run_user_info(self):
return {
'username': self.username,
'password': self.password,
'public_key': self.public_key,
'private_key': self.private_key
}
| gpl-2.0 |
Logan213/is210-week-05-warmup | tests/test_task_04.py | 28 | 1434 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests Task 04."""
# Import Python libs
import unittest
import task_04
class Task04TestCase(unittest.TestCase):
"""Test cases for Task 04."""
testmap = {
'not_enough_litterboxes': [2, 1, True, True],
'plenty_of_litterboxes': [1, 2, True, False],
'no_catfood': [1, 2, False, True],
'same_litterboxes': [1, 1, True, False],
}
def test_positional_args(self):
"""
Tests that too_many_kittens has the correct positional arguments.
"""
for case, params in self.testmap.iteritems():
result = task_04.too_many_kittens(*params[:3])
msg = 'Tried {} kittens, {} litterboxes and {} food, expected {}'
msg = msg.format(*params)
self.assertIs(result, params[3], msg)
def test_keyword_args(self):
"""
Tests that too_many_kittens has the correct keyword arguments.
"""
for case, params in self.testmap.iteritems():
result = task_04.too_many_kittens(kittens=params[0],
litterboxes=params[1],
catfood=params[2])
msg = 'Tried {} kittens, {} litterboxes, and {} food, expected {}'
msg = msg.format(*params)
self.assertIs(result, params[3], msg)
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
tudorvio/nova | nova/cmd/dhcpbridge.py | 41 | 4778 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handle lease database updates from DHCP servers.
"""
from __future__ import print_function
import os
import sys
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from nova.conductor import rpcapi as conductor_rpcapi
from nova import config
from nova import context
import nova.db.api
from nova import exception
from nova.i18n import _LE
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as objects_base
from nova import rpc
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
LOG = logging.getLogger(__name__)
def add_lease(mac, ip_address):
"""Set the IP that was assigned by the DHCP server."""
api = network_rpcapi.NetworkAPI()
api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host)
def old_lease(mac, ip_address):
"""Called when an old lease is recognized."""
# NOTE(vish): We assume we heard about this lease the first time.
# If not, we will get it the next time the lease is
# renewed.
pass
def del_lease(mac, ip_address):
"""Called when a lease expires."""
api = network_rpcapi.NetworkAPI()
api.release_fixed_ip(context.get_admin_context(), ip_address,
CONF.host, mac)
def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
network = objects.Network.get_by_id(ctxt, network_id)
network_manager = importutils.import_object(CONF.network_manager)
return network_manager.get_dhcp_leases(ctxt, network)
def add_action_parsers(subparsers):
subparsers.add_parser('init')
# NOTE(cfb): dnsmasq always passes mac, and ip. hostname
# is passed if known. We don't care about
# hostname, but argparse will complain if we
# do not accept it.
for action in ['add', 'del', 'old']:
parser = subparsers.add_parser(action)
parser.add_argument('mac')
parser.add_argument('ip')
parser.add_argument('hostname', nargs='?', default='')
parser.set_defaults(func=globals()[action + '_lease'])
CONF.register_cli_opt(
cfg.SubCommandOpt('action',
title='Action options',
help='Available dhcpbridge options',
handler=add_action_parsers))
def block_db_access():
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG.error(_LE('No db access allowed in nova-dhcpbridge: %s'),
stacktrace)
raise exception.DBNotAllowed('nova-dhcpbridge')
nova.db.api.IMPL = NoDB()
def main():
"""Parse environment and arguments and call the appropriate action."""
config.parse_args(sys.argv,
default_config_files=jsonutils.loads(os.environ['CONFIG_FILE']))
logging.setup(CONF, "nova")
global LOG
LOG = logging.getLogger('nova.dhcpbridge')
objects.register_all()
if not CONF.conductor.use_local:
block_db_access()
objects_base.NovaObject.indirection_api = \
conductor_rpcapi.ConductorAPI()
if CONF.action.name in ['add', 'del', 'old']:
LOG.debug("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'",
{"action": CONF.action.name,
"mac": CONF.action.mac,
"ip": CONF.action.ip})
CONF.action.func(CONF.action.mac, CONF.action.ip)
else:
try:
network_id = int(os.environ.get('NETWORK_ID'))
except TypeError:
LOG.error(_LE("Environment variable 'NETWORK_ID' must be set."))
return(1)
print(init_leases(network_id))
rpc.cleanup()
| apache-2.0 |
ragnarstroberg/ragnar_imsrg | src/pybind11/tests/test_modules.py | 4 | 2208 |
def test_nested_modules():
import pybind11_tests
from pybind11_tests.submodule import submodule_func
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.submodule.__name__ == "pybind11_tests.submodule"
assert submodule_func() == "submodule_func()"
def test_reference_internal():
from pybind11_tests import ConstructorStats
from pybind11_tests.submodule import A, B
b = B()
assert str(b.get_a1()) == "A[1]"
assert str(b.a1) == "A[1]"
assert str(b.get_a2()) == "A[2]"
assert str(b.a2) == "A[2]"
b.a1 = A(42)
b.a2 = A(43)
assert str(b.get_a1()) == "A[42]"
assert str(b.a1) == "A[42]"
assert str(b.get_a2()) == "A[43]"
assert str(b.a2) == "A[43]"
astats, bstats = ConstructorStats.get(A), ConstructorStats.get(B)
assert astats.alive() == 2
assert bstats.alive() == 1
del b
assert astats.alive() == 0
assert bstats.alive() == 0
assert astats.values() == ['1', '2', '42', '43']
assert bstats.values() == []
assert astats.default_constructions == 0
assert bstats.default_constructions == 1
assert astats.copy_constructions == 0
assert bstats.copy_constructions == 0
# assert astats.move_constructions >= 0 # Don't invoke any
# assert bstats.move_constructions >= 0 # Don't invoke any
assert astats.copy_assignments == 2
assert bstats.copy_assignments == 0
assert astats.move_assignments == 0
assert bstats.move_assignments == 0
def test_importing():
from pybind11_tests import OD
from collections import OrderedDict
assert OD is OrderedDict
assert str(OD([(1, 'a'), (2, 'b')])) == "OrderedDict([(1, 'a'), (2, 'b')])"
def test_pydoc():
"""Pydoc needs to be able to provide help() for everything inside a pybind11 module"""
import pybind11_tests
import pydoc
assert pybind11_tests.__name__ == "pybind11_tests"
assert pybind11_tests.__doc__ == "pybind11 test module"
assert pydoc.text.docmodule(pybind11_tests)
def test_duplicate_registration():
"""Registering two things with the same name"""
from pybind11_tests import duplicate_registration
assert duplicate_registration() == []
| gpl-2.0 |
brunobergher/dotfiles | sublime/pygments/all/pygments/lexers/textedit.py | 47 | 6057 | # -*- coding: utf-8 -*-
"""
pygments.lexers.textedit
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for languages related to text processing.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from bisect import bisect
from pygments.lexer import RegexLexer, include, default, bygroups, using, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.lexers.python import PythonLexer
__all__ = ['AwkLexer', 'VimLexer']
class AwkLexer(RegexLexer):
"""
For Awk scripts.
.. versionadded:: 1.5
"""
name = 'Awk'
aliases = ['awk', 'gawk', 'mawk', 'nawk']
filenames = ['*.awk']
mimetypes = ['application/x-awk']
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'#.*$', Comment.Single)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'\B', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|\|\||&&|in\b|\$|!?~|'
r'(\*\*|[-<>+*%\^/!=|])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(break|continue|do|while|exit|for|if|else|'
r'return)\b', Keyword, 'slashstartsregex'),
(r'function\b', Keyword.Declaration, 'slashstartsregex'),
(r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|'
r'length|match|split|sprintf|sub|substr|tolower|toupper|close|'
r'fflush|getline|next|nextfile|print|printf|strftime|systime|'
r'delete|system)\b', Keyword.Reserved),
(r'(ARGC|ARGIND|ARGV|BEGIN|CONVFMT|ENVIRON|END|ERRNO|FIELDWIDTHS|'
r'FILENAME|FNR|FS|IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|'
r'RSTART|RT|SUBSEP)\b', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
.. versionadded:: 0.8
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc', '.exrc', '.gvimrc',
'_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
_python = r'py(?:t(?:h(?:o(?:n)?)?)?)?'
tokens = {
'root': [
(r'^([ \t:]*)(' + _python + r')([ \t]*)(<<)([ \t]*)(.*)((?:\n|.)*)(\6)',
bygroups(using(this), Keyword, Text, Operator, Text, Text,
using(PythonLexer), Text)),
(r'^([ \t:]*)(' + _python + r')([ \t])(.*)',
bygroups(using(this), Keyword, Text, using(PythonLexer))),
(r'^\s*".*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(''|[^\n'])*'", String.Single),
# Who decided that doublequote was a good comment character??
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vim_builtins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w:
return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
| mit |
tliber/scrapy | setup.py | 83 | 1605 | from os.path import dirname, join
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name='Scrapy',
version=version,
url='http://scrapy.org',
description='A high-level Web Crawling and Web Scraping framework',
long_description=open('README.rst').read(),
author='Scrapy developers',
maintainer='Pablo Hoffman',
maintainer_email='[email protected]',
license='BSD',
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': ['scrapy = scrapy.cmdline:execute']
},
classifiers=[
'Framework :: Scrapy',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'Twisted>=10.0.0',
'w3lib>=1.8.0',
'queuelib',
'lxml',
'pyOpenSSL',
'cssselect>=0.9',
'six>=1.5.2',
'parsel>=0.9.3',
'PyDispatcher>=2.0.5',
'service_identity',
],
)
| bsd-3-clause |
oVirt/ovirt-hosted-engine-setup | src/plugins/gr-he-common/sanlock/lockspace.py | 1 | 2295 | #
# ovirt-hosted-engine-setup -- ovirt hosted engine setup
# Copyright (C) 2013-2017 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
sanlock lockspace initialization plugin.
"""
import gettext
from otopi import plugin
from otopi import util
from ovirt_hosted_engine_setup import constants as ohostedcons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-hosted-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""
sanlock lockspace initialization plugin.
"""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_INIT
)
def _init(self):
# TODO: check what's still in use and remove everything else from here
self.environment.setdefault(
ohostedcons.SanlockEnv.SANLOCK_SERVICE,
ohostedcons.Defaults.DEFAULT_SANLOCK_SERVICE
)
self.environment.setdefault(
ohostedcons.SanlockEnv.LOCKSPACE_NAME,
ohostedcons.Defaults.DEFAULT_LOCKSPACE_NAME
)
self.environment.setdefault(
ohostedcons.StorageEnv.METADATA_VOLUME_UUID,
None
)
self.environment.setdefault(
ohostedcons.StorageEnv.METADATA_IMAGE_UUID,
None
)
self.environment.setdefault(
ohostedcons.StorageEnv.LOCKSPACE_VOLUME_UUID,
None
)
self.environment.setdefault(
ohostedcons.StorageEnv.LOCKSPACE_IMAGE_UUID,
None
)
# vim: expandtab tabstop=4 shiftwidth=4
| lgpl-2.1 |
emd/boutanalysis | varyped.py | 1 | 4568 | '''Tools for interacting with VARYPED model equilibria'''
import numpy as np
from sys import argv
import string
import copy
def create_db(file_path):
'''Create a dictionary from a VARYPED results text file.
Parameters:
file_path -- string, path to the text file containing VARYPED results.
The first line of the file should begin with "VARYPED RESULTS...",
the second line should give the column titles, e.g. i, pres, cur,
and the remaining lines should give the model equilibria values.
Returns:
a dictionary that will be keyed by the VARYPED model equlibria index
with values that are themselves dictionaries of the parameters of
the corresponding model equilibria.
'''
f = open(file_path)
# v will be keyed by the model equlibria index, and its values will be
# dictionaries of the parameters of the corresponding model equilibria
v = dict()
# Create a temporary dictionary
d = dict()
# Read the lines from f into the dictionary v
for linenum, line in enumerate(f):
if linenum == 1:
# The titles of each column will be used as dictionary keys
keys = line.split()
elif linenum > 1:
for i, val in enumerate(line.split()):
if keys[i] == 'i':
d[keys[i]] = int(val)
else:
d[keys[i]] = float(val)
v[d['i']] = copy.copy(d)
f.close()
return v
def find_eq(v, p = None, J = None):
'''Find VARYPED model equlibria indices with the specified parameters.
Parameters:
v -- dictionary, the keys will be the VARYPED model equilibria
indices, and the value pairs will themselves be dictionaries
of the parameters of the corresponding equilibria
p -- scalar, specifying the fraction of the experimental pressure
gradient. To find the VARYPED model equilibria indices with
75% of the experimental pressure gradient, specify p = 0.75, etc
J -- scalar, specifying the fraction of the experimental edge current
density. To find the VARYPED model equilibria indices with
110% of the experimental edge current density, specify J = 1.10
Returns:
The indices of the VARYPED equilibria with the specified pressure
gradients and edge current densities.
'''
if p is None and J is None:
print 'No parameters specified. Returning NoneType object.'
return None
# Sort through the dictionary v for model equilibria that have the
# specified parameters and add their index to the list ind.
# NOTE: We also only count equilibria that converged (convrg = 1),
# as unconverged equilibria are useless to us.
ind = list()
for eq in v.keys():
if p is None:
if v[eq]['cur'] == J and v[eq]['convrg'] == 1:
ind.append(eq)
elif J is None:
if v[eq]['pres'] == p and v[eq]['convrg'] == 1:
ind.append(eq)
elif v[eq]['pres'] == p and v[eq]['cur'] == J and v[eq]['convrg'] == 1:
ind.append(eq)
return ind
def get_params(v, ind):
'''Get model parameters for specified VARYPED model equilibria.
Parameters:
v -- dictionary, the keys will be the VARYPED model equilibria
indices, and the value pairs will themselves be dictionaries
of the parameters of the corresponding equilibria
ind -- list, the index of equilibria to obtain parameters for
Returns:
params -- list, with each line giving an ordered pair (p, J), where
p is the fraction of the experimental pressure gradient and
J is the fraction of the experimental edge current density.
'''
params = list()
for i in ind:
params.append((v[i]['pres'], v[i]['cur']))
return params
if __name__ == '__main__':
file_path, p, J = argv[1:4]
if p == 'None':
p = None
else:
p = float(p)
if J == 'None':
J = None
else:
J = float(J)
v = create_db(file_path)
ind = find_eq(v, p, J)
params = get_params(v, ind)
if ind is not None:
column = '{:<6}'
print '\nVARYPED Equilibria:'
print column.format('i') + column.format('p') + column.format('J')
print '-'*15
for i in range(len(ind)):
out = (column.format(str(ind[i]))
+ column.format(str(params[i][0]))
+ column.format(str(params[i][1])))
print out
print ''
| lgpl-3.0 |
SM-G920P/G92XP-R4_COI9 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/tseries/tests/test_base.py | 9 | 82416 | from __future__ import print_function
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assertIsInstance
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
import pandas.tseries.offsets as offsets
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEqual(s.year,2000)
self.assertEqual(s.month,1)
self.assertEqual(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4, name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03','2012-01-04'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name', tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H', name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00', '2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00', '2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='H')""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns, US/Eastern]', freq=None)""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00+00:00', '2011-01-01 10:00:00+00:00', 'NaT'], dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00
Freq: H"""
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng - delta
expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz='Asia/Tokyo', name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz='Asia/Tokyo', name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_take(self):
#GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', '-3D',
'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ ]
def test_ops_properties(self):
self.check_ops_properties(['days','hours','minutes','seconds','milliseconds'])
self.check_ops_properties(['microseconds','nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'),
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = """TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"""
exp3 = """TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"""
exp4 = """TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq='D')"""
exp5 = """TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', '3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"""
with pd.option_context('display.width',300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width',300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = """TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
rng = timedelta_range('1 days','10 days',name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda : rng * offset)
# divide
expected = Int64Index((np.arange(10)+1)*12,name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda : rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda : tdi - dt)
self.assertRaises(TypeError, lambda : tdi - dti)
self.assertRaises(TypeError, lambda : td - dt)
self.assertRaises(TypeError, lambda : td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101',periods=3)
ts = Timestamp('20130101')
dt = ts.to_datetime()
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_datetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result,expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda : dt_tz - ts)
self.assertRaises(TypeError, lambda : dt_tz - dt)
self.assertRaises(TypeError, lambda : dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda : dt - dt_tz)
self.assertRaises(TypeError, lambda : ts - dt_tz)
self.assertRaises(TypeError, lambda : ts_tz2 - ts)
self.assertRaises(TypeError, lambda : ts_tz2 - dt)
self.assertRaises(TypeError, lambda : ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda : dti - ts_tz)
self.assertRaises(TypeError, lambda : dti_tz - ts)
self.assertRaises(TypeError, lambda : dti_tz - ts_tz2)
result = dti_tz-dt_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = dt_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti_tz-ts_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = ts_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(['20121231','20130101','20130102'],tz='US/Eastern')
tm.assert_index_equal(result,expected)
def test_dti_dti_deprecated_ops(self):
# deprecated in 0.16.0 (GH9094)
# change to return subtraction -> TimeDeltaIndex in 0.17.0
# shoudl move to the appropriate sections above
dti = date_range('20130101',periods=3)
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
with tm.assert_produces_warning(FutureWarning):
result = dti-dti
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti+dti
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti_tz
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz+dti_tz
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti-dti_tz
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti_tz+dti)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti+dti_tz)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda : tdi + dti[0:1])
self.assertRaises(ValueError, lambda : tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3]))
# this is a union!
#self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
'1 days 08:00:00', '1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
#GH 9680
tdi = pd.timedelta_range(start=0,periods=10,freq='1s')
ts = pd.Series(np.random.normal(size=10),index=tdi)
self.assertNotIn('foo',ts.__dict__.keys())
self.assertRaises(AttributeError,lambda : ts.foo)
def test_order(self):
#GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx')
idx2 = TimedeltaIndex(['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
'3 day', '5 day'], name='idx2')
idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
'2 minute', pd.NaT], name='idx3')
exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
'5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_take(self):
#GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex([], dtype='int64', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='int64', freq='D')"""
exp3 = """PeriodIndex(['2011-01-01', '2011-01-02'], dtype='int64', freq='D')"""
exp4 = """PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='int64', freq='D')"""
exp5 = """PeriodIndex(['2011', '2012', '2013'], dtype='int64', freq='A-DEC')"""
exp6 = """PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], dtype='int64', freq='H')"""
exp7 = """PeriodIndex(['2013Q1'], dtype='int64', freq='Q-DEC')"""
exp8 = """PeriodIndex(['2013Q1', '2013Q2'], dtype='int64', freq='Q-DEC')"""
exp9 = """PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='int64', freq='Q-DEC')"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH 6527
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3),
np.timedelta64(72, 'h'), Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120),
np.timedelta64(120, 'm'), Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
'2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012', '2011'], name='pidx', freq='A')
pexpected = PeriodIndex(['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx', freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertEqual(ordered.freq, 'D')
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx[-1]
self.assertEqual(result, pd.Period('2011-01-31', freq='D'))
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_take(self):
#GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx.take([5])
self.assertEqual(result, pd.Period('2011-01-06', freq='D'))
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| gpl-2.0 |
maistrovas/My-Courses-Solutions | Coursera Algorithmic Thinking (Part 1)/Module 2/Application/Application2.py | 1 | 7494 | """
Provided code for Application portion of Module 2
Answers 4/6
Application Grade is 13 out of 15
Text Answers
-Question 2:
All three graphs are resilient in this case.
Question5:
-UPA and ER graphs are steel resilient
(UPA is very close to overcoming 25% roughnes)
in this type of attack.
"""
# general imports
import urllib2
import random
import timeit
import time
import math
import UPA
from collections import deque
from random import shuffle
import BFS_project as project
import matplotlib.pyplot as plt
import numpy as np
# CodeSkulptor import
#import simpleplot
#import codeskulptor
#codeskulptor.set_timeout(60)
# Desktop imports
#import matplotlib.pyplot as plt
############################################
# Provided code
def copy_graph(graph):
"""
Make a copy of a graph
"""
new_graph = {}
for node in graph:
new_graph[node] = set(graph[node])
return new_graph
def delete_node(ugraph, node):
"""
Delete a node from an undirected graph
"""
neighbors = ugraph[node]
ugraph.pop(node)
for neighbor in neighbors:
ugraph[neighbor].remove(node)
def targeted_order(ugraph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = copy_graph(ugraph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##########################################################
# Code for loading computer network graph
NETWORK_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_rf7.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
counter = 0
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
counter +=1
answer_graph[node].add(int(neighbor))
print 'Number network edges = ', counter / 2
return answer_graph
def er_graph(n, p):
'''
implementation of ER algorithm
n - final number of nodes
p - probability
'''
graph = {key: set() for key in xrange(n)}
counter = 0
for i in xrange(n):
for j in xrange(n):
if i == j:
continue
if random.random() < p:
counter += 1
graph[i].add(j)
graph[j].add(i)
print 'Number of ER-edges=', counter
return graph
##UPA-Algorithm
def algorithm_upa(n, m):
'''
implementation of UPA algorithm
n - final number of nodes
m - number of existing nodes
p - probability for er_graph
'''
graph = er_graph(m, 1)
upa = UPA.UPATrial(m)
counter = 0
for i in xrange(m, n):
new_edges = upa.run_trial(m)
graph[i] = new_edges
for node in new_edges:
graph[node].add(i)
return graph
def random_order(graph):
'''
takes a graph and returns a list
of the nodes in the graph in some random order
'''
result = deque()
for node in graph:
result.append(node)
shuffle(result)
return result
loaded_graph = load_graph(NETWORK_URL)
er_ggraph = er_graph(1239, 0.004)
upa_graph = algorithm_upa(1239, 3)
def count_Uedges(ugraph):
'''
count edges in the graph
'''
counter = 0
for i in ugraph:
for j in ugraph[i]:
counter +=1
return counter/2
# print 'UPA edges = ', count_Uedges(upa_graph)
# print 'ER edges =', count_Uedges(er_ggraph)
# print 'Network graph edges =', count_Uedges(loaded_graph)
def plotting(net_g, er_g, upa_g, question):
"""
Plot an example with two curves with legends
x - number of nodes removed
y - size of the largest connect component
in the graphs resulting from the node removal.
"""
if question == 1:
print 'The function plots question 1'
network_order = random_order(net_g)
er_order = random_order(er_g)
upa_order = random_order(upa_g)
if question == 4:
print 'The function plots question 4'
network_order = targeted_order(net_g)
er_order = targeted_order(er_g)
upa_order = targeted_order(upa_g)
network_resil = project.compute_resilience(net_g, network_order)
er_resil = project.compute_resilience(er_g, er_order)
upa_resil = project.compute_resilience(upa_g, upa_order)
xvals_net = np.array([node for node in range(len(network_order) +1 )])
xvals_er = np.array([node for node in range(len(er_order) +1 )])
xvals_upa = np.array([node for node in range(len(upa_order) +1 )])
yvals_net = np.array(network_resil)
yvals_er = np.array(er_resil)
yvals_upa = np.array(upa_resil)
plt.figure('Application2 Plot')
plt.title('Resilience comparison')
plt.xlabel('Removed nodes')
plt.ylabel('Largest conected component')
plt.plot(xvals_net, yvals_net, '-b', label='Network-Data')
plt.plot(xvals_er, yvals_er, '-r', label='ER-Algorithm (p = 0.004)')
plt.plot(xvals_upa, yvals_upa, '-g', label='UPA-Algorithm (m = 3)')
plt.legend(loc='upper right')
plt.show()
'''
Questions 1,4
'''
plotting(loaded_graph, er_ggraph, upa_graph, 1)
#plotting(loaded_graph, er_ggraph, upa_graph, 4)
def measure_targeted_order(n, m, func):
graph = algorithm_upa(n, m)
return timeit.timeit(lambda: func(graph), number=1)
def fast_targeted_order(ugraph):
'''
comment
'''
ugraph = copy_graph(ugraph)
N = len(ugraph)
degree_sets = [set()] * N
for node, neighbors in ugraph.iteritems():
degree = len(neighbors)
degree_sets[degree].add(node)
order = []
for k in range(N - 1, -1, -1):
while degree_sets[k]:
u = degree_sets[k].pop()
for neighbor in ugraph[u]:
d = len(ugraph[neighbor])
degree_sets[d].remove(neighbor)
degree_sets[d - 1].add(neighbor)
order.append(u)
delete_node(ugraph, u)
return order
def question3():
'''
Function plotting Question 3
'''
xs = range(10, 1000, 10)
m = 5
ys_tagreted = [measure_targeted_order(n, m, targeted_order) for n in xs]
ys_fast_targeted = [measure_targeted_order(n, m, fast_targeted_order) for n in xs]
plt.plot(xs, ys_tagreted, '-r', label='targeted_order')
plt.plot(xs, ys_fast_targeted, '-b', label='fast_targeted_order')
plt.title('Targeted order functions performance (desktop Python)')
plt.xlabel('Number of nodes in the graph')
plt.ylabel('Execution time')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
'''
Question3
Include only plotting
'''
question3()
| mit |
blink1073/scikit-image | skimage/color/tests/test_adapt_rgb.py | 19 | 2591 | from functools import partial
import numpy as np
from skimage import img_as_float, img_as_uint
from skimage import color, data, filters
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage._shared._warnings import expected_warnings
# Down-sample image for quicker testing.
COLOR_IMAGE = data.astronaut()[::5, ::5]
GRAY_IMAGE = data.camera()[::5, ::5]
SIGMA = 3
smooth = partial(filters.gaussian, sigma=SIGMA)
assert_allclose = partial(np.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
with expected_warnings(['precision loss']):
return img_as_uint(filters.sobel(image))
def test_gray_scale_image():
# We don't need to test both `hsv_value` and `each_channel` since
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
# a dtype mismatch.
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
# Reduce tolerance because dtype conversion.
assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
| bsd-3-clause |
cyberden/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/collegehumor.py | 150 | 3638 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class CollegeHumorIE(InfoExtractor):
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/(video|embed|e)/(?P<videoid>[0-9]+)/?(?P<shorttitle>.*)$'
_TESTS = [
{
'url': 'http://www.collegehumor.com/video/6902724/comic-con-cosplay-catastrophe',
'md5': 'dcc0f5c1c8be98dc33889a191f4c26bd',
'info_dict': {
'id': '6902724',
'ext': 'mp4',
'title': 'Comic-Con Cosplay Catastrophe',
'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.",
'age_limit': 13,
'duration': 187,
},
}, {
'url': 'http://www.collegehumor.com/video/3505939/font-conference',
'md5': '72fa701d8ef38664a4dbb9e2ab721816',
'info_dict': {
'id': '3505939',
'ext': 'mp4',
'title': 'Font Conference',
'description': "This video wasn't long enough, so we made it double-spaced.",
'age_limit': 10,
'duration': 179,
},
}, {
# embedded youtube video
'url': 'http://www.collegehumor.com/embed/6950306',
'info_dict': {
'id': 'Z-bao9fg6Yc',
'ext': 'mp4',
'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
'uploader': 'Mark Dice',
'uploader_id': 'MarkDice',
'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
'upload_date': '20140127',
},
'params': {
'skip_download': True,
},
'add_ie': ['Youtube'],
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
jsonUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id + '.json'
data = json.loads(self._download_webpage(
jsonUrl, video_id, 'Downloading info JSON'))
vdata = data['video']
if vdata.get('youtubeId') is not None:
return {
'_type': 'url',
'url': vdata['youtubeId'],
'ie_key': 'Youtube',
}
AGE_LIMITS = {'nc17': 18, 'r': 18, 'pg13': 13, 'pg': 10, 'g': 0}
rating = vdata.get('rating')
if rating:
age_limit = AGE_LIMITS.get(rating.lower())
else:
age_limit = None # None = No idea
PREFS = {'high_quality': 2, 'low_quality': 0}
formats = []
for format_key in ('mp4', 'webm'):
for qname, qurl in vdata.get(format_key, {}).items():
formats.append({
'format_id': format_key + '_' + qname,
'url': qurl,
'format': format_key,
'preference': PREFS.get(qname),
})
self._sort_formats(formats)
duration = int_or_none(vdata.get('duration'), 1000)
like_count = int_or_none(vdata.get('likes'))
return {
'id': video_id,
'title': vdata['title'],
'description': vdata.get('description'),
'thumbnail': vdata.get('thumbnail'),
'formats': formats,
'age_limit': age_limit,
'duration': duration,
'like_count': like_count,
}
| gpl-3.0 |
wooga/airflow | tests/providers/amazon/aws/sensors/test_sagemaker_base.py | 1 | 4679 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.sensors.sagemaker_base import SageMakerBaseSensor
class TestSagemakerBaseSensor(unittest.TestCase):
def test_execute(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'COMPLETED'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
sensor.execute(None)
def test_poke_with_unfinished_job(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'PENDING'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertEqual(sensor.poke(None), False)
def test_poke_with_not_implemented_method(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertRaises(NotImplementedError, sensor.poke, None)
def test_poke_with_bad_response(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'COMPLETED'},
'ResponseMetadata': {'HTTPStatusCode': 400}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertEqual(sensor.poke(None), False)
def test_poke_with_job_failure(self):
class SageMakerBaseSensorSubclass(SageMakerBaseSensor):
def non_terminal_states(self):
return ['PENDING', 'RUNNING', 'CONTINUE']
def failed_states(self):
return ['FAILED']
def get_sagemaker_response(self):
return {
'SomeKey': {'State': 'FAILED'},
'ResponseMetadata': {'HTTPStatusCode': 200}
}
def state_from_response(self, response):
return response['SomeKey']['State']
sensor = SageMakerBaseSensorSubclass(
task_id='test_task',
poke_interval=2,
aws_conn_id='aws_test'
)
self.assertRaises(AirflowException, sensor.poke, None)
| apache-2.0 |
Timurdov/bionic | bionic/Lib/site-packages/django/db/backends/sqlite3/base.py | 52 | 23862 | """
SQLite3 backend for django.
Works with either the pysqlite2 module or the sqlite3 module in the
standard library.
"""
from __future__ import unicode_literals
import datetime
import decimal
import warnings
import re
from django.conf import settings
from django.db import utils
from django.db.backends import (utils as backend_utils, BaseDatabaseFeatures,
BaseDatabaseOperations, BaseDatabaseWrapper, BaseDatabaseValidation)
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes
from django.utils import six
from django.utils import timezone
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError:
from sqlite3 import dbapi2 as Database
except ImportError as exc:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
try:
import pytz
except ImportError:
pytz = None
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
def parse_datetime_with_timezone_support(value):
dt = parse_datetime(value)
# Confirm that dt is naive before overwriting its tzinfo.
if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
dt = dt.replace(tzinfo=timezone.utc)
return dt
def adapt_datetime_with_timezone_support(value):
# Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
if settings.USE_TZ:
if timezone.is_naive(value):
warnings.warn("SQLite received a naive datetime (%s)"
" while time zone support is active." % value,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
value = value.astimezone(timezone.utc).replace(tzinfo=None)
return value.isoformat(str(" "))
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
Database.register_converter(str("time"), decoder(parse_time))
Database.register_converter(str("date"), decoder(parse_date))
Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
Database.register_converter(str("decimal"), decoder(backend_utils.typecast_decimal))
Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
if six.PY2:
Database.register_adapter(str, lambda s: s.decode('utf-8'))
Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
class DatabaseOperations(BaseDatabaseOperations):
def bulk_batch_size(self, fields, objs):
"""
SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
999 variables per query.
If there is just single field to insert, then we can hit another
limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
"""
limit = 999 if len(fields) > 1 else 500
return (limit // len(fields)) if len(fields) > 0 else len(objs)
def check_aggregate_support(self, aggregate):
bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
bad_aggregates = (aggregates.Sum, aggregates.Avg,
aggregates.Variance, aggregates.StdDev)
if (isinstance(aggregate.source, bad_fields) and
isinstance(aggregate, bad_aggregates)):
raise NotImplementedError(
'You cannot use Sum, Avg, StdDev and Variance aggregations '
'on date/time fields in sqlite3 '
'since date/time is saved as text.')
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_extract('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
if settings.USE_TZ:
if pytz is None:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("This query requires pytz, "
"but it isn't installed.")
return "django_datetime_trunc('%s', %s, %%s)" % (
lookup_type.lower(), field_name), [tzname]
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return "NULL"
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def value_to_db_datetime(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
# SQLite doesn't support tz-aware datetimes
if timezone.is_aware(value):
raise ValueError("SQLite backend does not support timezone-aware times.")
return six.text_type(value)
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
if value is None:
return None
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return backend_utils.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return parse_date(value)
elif internal_type == 'DateTimeField':
return parse_datetime_with_timezone_support(value)
elif internal_type == 'TimeField':
return parse_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
def bulk_insert_sql(self, fields, num_values):
res = []
res.append("SELECT %s" % ", ".join(
"%%s AS %s" % self.quote_name(f.column) for f in fields
))
res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
return " ".join(res)
def combine_expression(self, connector, sub_expressions):
# SQLite doesn't have a power function, so we fake it with a
# user-defined function django_power that's registered in connect().
if connector == '^':
return 'django_power(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def integer_field_range(self, internal_type):
# SQLite doesn't enforce any integer constraints
return (None, None)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
pattern_ops = {
'startswith': "LIKE %s || '%%%%'",
'istartswith': "LIKE UPPER(%s) || '%%%%'",
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign key references. This method is
intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint checks were off.
Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
detailed information about the invalid reference in the error message.
Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute("""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
% (primary_key_column_name, column_name, table_name, referenced_table_name,
column_name, referenced_column_name, column_name, referenced_column_name))
for bad_row in cursor.fetchall():
raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
"foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
% (table_name, bad_row[0], table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def schema_editor(self, *args, **kwargs):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self, *args, **kwargs)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_datetime_extract(lookup_type, dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = backend_utils.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(dt)
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| apache-2.0 |
PeterMosmans/letsencrypt | letsencrypt/tests/auth_handler_test.py | 4 | 18143 | """Tests for letsencrypt.auth_handler."""
import functools
import logging
import unittest
import mock
from acme import challenges
from acme import client as acme_client
from acme import messages
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import acme_util
TRANSLATE = {
"dvsni": "DVSNI",
"simpleHttp": "SimpleHTTP",
"dns": "DNS",
"recoveryToken": "RecoveryToken",
"recoveryContact": "RecoveryContact",
"proofOfPossession": "ProofOfPossession",
}
class ChallengeFactoryTest(unittest.TestCase):
# pylint: disable=protected-access
def setUp(self):
from letsencrypt.auth_handler import AuthHandler
# Account is mocked...
self.handler = AuthHandler(
None, None, None, mock.Mock(key="mock_key"))
self.dom = "test"
self.handler.authzr[self.dom] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.dom, acme_util.CHALLENGES,
[messages.STATUS_PENDING]*6, False)
def test_all(self):
cont_c, dv_c = self.handler._challenge_factory(self.dom, range(0, 6))
self.assertEqual(
[achall.chall for achall in cont_c], acme_util.CONT_CHALLENGES)
self.assertEqual(
[achall.chall for achall in dv_c], acme_util.DV_CHALLENGES)
def test_one_dv_one_cont(self):
cont_c, dv_c = self.handler._challenge_factory(self.dom, [1, 4])
self.assertEqual(
[achall.chall for achall in cont_c], [acme_util.RECOVERY_TOKEN])
self.assertEqual([achall.chall for achall in dv_c], [acme_util.DVSNI])
def test_unrecognized(self):
self.handler.authzr["failure.com"] = acme_util.gen_authzr(
messages.STATUS_PENDING, "failure.com",
[mock.Mock(chall="chall", typ="unrecognized")],
[messages.STATUS_PENDING])
self.assertRaises(
errors.Error, self.handler._challenge_factory, "failure.com", [0])
class GetAuthorizationsTest(unittest.TestCase):
"""get_authorizations test.
This tests everything except for all functions under _poll_challenges.
"""
def setUp(self):
from letsencrypt.auth_handler import AuthHandler
self.mock_dv_auth = mock.MagicMock(name="ApacheConfigurator")
self.mock_cont_auth = mock.MagicMock(name="ContinuityAuthenticator")
self.mock_dv_auth.get_chall_pref.return_value = [challenges.DVSNI]
self.mock_cont_auth.get_chall_pref.return_value = [
challenges.RecoveryToken]
self.mock_cont_auth.perform.side_effect = gen_auth_resp
self.mock_dv_auth.perform.side_effect = gen_auth_resp
self.mock_account = mock.Mock(key=le_util.Key("file_path", "PEM"))
self.mock_net = mock.MagicMock(spec=acme_client.Client)
self.handler = AuthHandler(
self.mock_dv_auth, self.mock_cont_auth,
self.mock_net, self.mock_account)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@mock.patch("letsencrypt.auth_handler.AuthHandler._poll_challenges")
def test_name1_dvsni1(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.DV_CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 1)
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(chall_update.keys(), ["0"])
self.assertEqual(len(chall_update.values()), 1)
self.assertEqual(self.mock_dv_auth.cleanup.call_count, 1)
self.assertEqual(self.mock_cont_auth.cleanup.call_count, 0)
# Test if list first element is DVSNI, use typ because it is an achall
self.assertEqual(
self.mock_dv_auth.cleanup.call_args[0][0][0].typ, "dvsni")
self.assertEqual(len(authzr), 1)
@mock.patch("letsencrypt.auth_handler.AuthHandler._poll_challenges")
def test_name3_dvsni3_rectok_3(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0", "1", "2"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 6)
# Check poll call
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(len(chall_update.keys()), 3)
self.assertTrue("0" in chall_update.keys())
self.assertEqual(len(chall_update["0"]), 2)
self.assertTrue("1" in chall_update.keys())
self.assertEqual(len(chall_update["1"]), 2)
self.assertTrue("2" in chall_update.keys())
self.assertEqual(len(chall_update["2"]), 2)
self.assertEqual(self.mock_dv_auth.cleanup.call_count, 1)
self.assertEqual(self.mock_cont_auth.cleanup.call_count, 1)
self.assertEqual(len(authzr), 3)
def test_perform_failure(self):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
self.mock_dv_auth.perform.side_effect = errors.AuthorizationError
self.assertRaises(
errors.AuthorizationError, self.handler.get_authorizations, ["0"])
def _validate_all(self, unused_1, unused_2):
for dom in self.handler.authzr.keys():
azr = self.handler.authzr[dom]
self.handler.authzr[dom] = acme_util.gen_authzr(
messages.STATUS_VALID,
dom,
[challb.chall for challb in azr.body.challenges],
[messages.STATUS_VALID]*len(azr.body.challenges),
azr.body.combinations)
class PollChallengesTest(unittest.TestCase):
# pylint: disable=protected-access
"""Test poll challenges."""
def setUp(self):
from letsencrypt.auth_handler import challb_to_achall
from letsencrypt.auth_handler import AuthHandler
# Account and network are mocked...
self.mock_net = mock.MagicMock()
self.handler = AuthHandler(
None, None, self.mock_net, mock.Mock(key="mock_key"))
self.doms = ["0", "1", "2"]
self.handler.authzr[self.doms[0]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[0],
acme_util.DV_CHALLENGES, [messages.STATUS_PENDING]*3, False)
self.handler.authzr[self.doms[1]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[1],
acme_util.DV_CHALLENGES, [messages.STATUS_PENDING]*3, False)
self.handler.authzr[self.doms[2]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[2],
acme_util.DV_CHALLENGES, [messages.STATUS_PENDING]*3, False)
self.chall_update = {}
for dom in self.doms:
self.chall_update[dom] = [
challb_to_achall(challb, "dummy_key", dom)
for challb in self.handler.authzr[dom].body.challenges]
@mock.patch("letsencrypt.auth_handler.time")
def test_poll_challenges(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.handler._poll_challenges(self.chall_update, False)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_VALID)
@mock.patch("letsencrypt.auth_handler.time")
def test_poll_challenges_failure_best_effort(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.handler._poll_challenges(self.chall_update, True)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_PENDING)
@mock.patch("letsencrypt.auth_handler.time")
@mock.patch("letsencrypt.auth_handler.zope.component.getUtility")
def test_poll_challenges_failure(self, unused_mock_time, unused_mock_zope):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
@mock.patch("letsencrypt.auth_handler.time")
def test_unable_to_find_challenge_status(self, unused_mock_time):
from letsencrypt.auth_handler import challb_to_achall
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.chall_update[self.doms[0]].append(
challb_to_achall(acme_util.RECOVERY_CONTACT_P, "key", self.doms[0]))
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
def test_verify_authzr_failure(self):
self.assertRaises(
errors.AuthorizationError, self.handler.verify_authzr_complete)
def _mock_poll_solve_one_valid(self, authzr):
# Pending here because my dummy script won't change the full status.
# Basically it didn't raise an error and it stopped earlier than
# Making all challenges invalid which would make mock_poll_solve_one
# change authzr to invalid
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_VALID)
def _mock_poll_solve_one_invalid(self, authzr):
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_INVALID)
def _mock_poll_solve_one_chall(self, authzr, desired_status):
# pylint: disable=no-self-use
"""Dummy method that solves one chall at a time to desired_status.
When all are solved.. it changes authzr.status to desired_status
"""
new_challbs = authzr.body.challenges
for challb in authzr.body.challenges:
if challb.status != desired_status:
new_challbs = tuple(
challb_temp if challb_temp != challb
else acme_util.chall_to_challb(challb.chall, desired_status)
for challb_temp in authzr.body.challenges
)
break
if all(test_challb.status == desired_status
for test_challb in new_challbs):
status_ = desired_status
else:
status_ = authzr.body.status
new_authzr = messages.AuthorizationResource(
uri=authzr.uri,
new_cert_uri=authzr.new_cert_uri,
body=messages.Authorization(
identifier=authzr.body.identifier,
challenges=new_challbs,
combinations=authzr.body.combinations,
status=status_,
),
)
return (new_authzr, "response")
class GenChallengePathTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler.gen_challenge_path.
.. todo:: Add more tests for dumb_path... depending on what we want to do.
"""
def setUp(self):
logging.disable(logging.fatal)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, challbs, preferences, combinations):
from letsencrypt.auth_handler import gen_challenge_path
return gen_challenge_path(challbs, preferences, combinations)
def test_common_case(self):
"""Given DVSNI and SimpleHTTP with appropriate combos."""
challbs = (acme_util.DVSNI_P, acme_util.SIMPLE_HTTP_P)
prefs = [challenges.DVSNI]
combos = ((0,), (1,))
# Smart then trivial dumb path test
self.assertEqual(self._call(challbs, prefs, combos), (0,))
self.assertTrue(self._call(challbs, prefs, None))
# Rearrange order...
self.assertEqual(self._call(challbs[::-1], prefs, combos), (1,))
self.assertTrue(self._call(challbs[::-1], prefs, None))
def test_common_case_with_continuity(self):
challbs = (acme_util.RECOVERY_TOKEN_P,
acme_util.RECOVERY_CONTACT_P,
acme_util.DVSNI_P,
acme_util.SIMPLE_HTTP_P)
prefs = [challenges.RecoveryToken, challenges.DVSNI]
combos = acme_util.gen_combos(challbs)
self.assertEqual(self._call(challbs, prefs, combos), (0, 2))
# dumb_path() trivial test
self.assertTrue(self._call(challbs, prefs, None))
def test_full_cont_server(self):
challbs = (acme_util.RECOVERY_TOKEN_P,
acme_util.RECOVERY_CONTACT_P,
acme_util.POP_P,
acme_util.DVSNI_P,
acme_util.SIMPLE_HTTP_P,
acme_util.DNS_P)
# Typical webserver client that can do everything except DNS
# Attempted to make the order realistic
prefs = [challenges.RecoveryToken,
challenges.ProofOfPossession,
challenges.SimpleHTTP,
challenges.DVSNI,
challenges.RecoveryContact]
combos = acme_util.gen_combos(challbs)
self.assertEqual(self._call(challbs, prefs, combos), (0, 4))
# Dumb path trivial test
self.assertTrue(self._call(challbs, prefs, None))
def test_not_supported(self):
challbs = (acme_util.POP_P, acme_util.DVSNI_P)
prefs = [challenges.DVSNI]
combos = ((0, 1),)
self.assertRaises(
errors.AuthorizationError, self._call, challbs, prefs, combos)
class MutuallyExclusiveTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler.mutually_exclusive."""
# pylint: disable=invalid-name,missing-docstring,too-few-public-methods
class A(object):
pass
class B(object):
pass
class C(object):
pass
class D(C):
pass
@classmethod
def _call(cls, chall1, chall2, different=False):
from letsencrypt.auth_handler import mutually_exclusive
return mutually_exclusive(chall1, chall2, groups=frozenset([
frozenset([cls.A, cls.B]), frozenset([cls.A, cls.C]),
]), different=different)
def test_group_members(self):
self.assertFalse(self._call(self.A(), self.B()))
self.assertFalse(self._call(self.A(), self.C()))
def test_cross_group(self):
self.assertTrue(self._call(self.B(), self.C()))
def test_same_type(self):
self.assertFalse(self._call(self.A(), self.A(), different=False))
self.assertTrue(self._call(self.A(), self.A(), different=True))
# in particular...
obj = self.A()
self.assertFalse(self._call(obj, obj, different=False))
self.assertTrue(self._call(obj, obj, different=True))
def test_subclass(self):
self.assertFalse(self._call(self.A(), self.D()))
self.assertFalse(self._call(self.D(), self.A()))
class IsPreferredTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler.is_preferred."""
@classmethod
def _call(cls, chall, satisfied):
from letsencrypt.auth_handler import is_preferred
return is_preferred(chall, satisfied, exclusive_groups=frozenset([
frozenset([challenges.DVSNI, challenges.SimpleHTTP]),
frozenset([challenges.DNS, challenges.SimpleHTTP]),
]))
def test_empty_satisfied(self):
self.assertTrue(self._call(acme_util.DNS_P, frozenset()))
def test_mutually_exclusvie(self):
self.assertFalse(
self._call(
acme_util.DVSNI_P, frozenset([acme_util.SIMPLE_HTTP_P])))
def test_mutually_exclusive_same_type(self):
self.assertTrue(
self._call(acme_util.DVSNI_P, frozenset([acme_util.DVSNI_P])))
class ReportFailedChallsTest(unittest.TestCase):
"""Tests for letsencrypt.auth_handler._report_failed_challs."""
# pylint: disable=protected-access
def setUp(self):
from letsencrypt import achallenges
kwargs = {
"chall" : acme_util.SIMPLE_HTTP,
"uri": "uri",
"status": messages.STATUS_INVALID,
"error": messages.Error(typ="tls", detail="detail"),
}
self.simple_http = achallenges.SimpleHTTP(
challb=messages.ChallengeBody(**kwargs),# pylint: disable=star-args
domain="example.com",
key=acme_util.KEY)
kwargs["chall"] = acme_util.DVSNI
self.dvsni_same = achallenges.DVSNI(
challb=messages.ChallengeBody(**kwargs),# pylint: disable=star-args
domain="example.com",
key=acme_util.KEY)
kwargs["error"] = messages.Error(typ="dnssec", detail="detail")
self.dvsni_diff = achallenges.DVSNI(
challb=messages.ChallengeBody(**kwargs),# pylint: disable=star-args
domain="foo.bar",
key=acme_util.KEY)
@mock.patch("letsencrypt.auth_handler.zope.component.getUtility")
def test_same_error_and_domain(self, mock_zope):
from letsencrypt import auth_handler
auth_handler._report_failed_challs([self.simple_http, self.dvsni_same])
call_list = mock_zope().add_message.call_args_list
self.assertTrue(len(call_list) == 1)
self.assertTrue("Domains: example.com\n" in call_list[0][0][0])
@mock.patch("letsencrypt.auth_handler.zope.component.getUtility")
def test_different_errors_and_domains(self, mock_zope):
from letsencrypt import auth_handler
auth_handler._report_failed_challs([self.simple_http, self.dvsni_diff])
self.assertTrue(mock_zope().add_message.call_count == 2)
def gen_auth_resp(chall_list):
"""Generate a dummy authorization response."""
return ["%s%s" % (chall.__class__.__name__, chall.domain)
for chall in chall_list]
def gen_dom_authzr(domain, unused_new_authzr_uri, challs):
"""Generates new authzr for domains."""
return acme_util.gen_authzr(
messages.STATUS_PENDING, domain, challs,
[messages.STATUS_PENDING]*len(challs))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
aospx-kitkat/platform_external_chromium_org | ui/resources/resource_check/resource_scale_factors.py | 41 | 4151 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium browser resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
import os
import struct
class ResourceScaleFactors(object):
"""Verifier of image dimensions for Chromium resources.
This class verifies the image dimensions of resources in the various
resource subdirectories.
Attributes:
paths: An array of tuples giving the folders to check and their
relevant scale factors. For example:
[(100, 'default_100_percent'), (200, 'default_200_percent')]
"""
def __init__(self, input_api, output_api, paths):
""" Initializes ResourceScaleFactors with paths."""
self.input_api = input_api
self.output_api = output_api
self.paths = paths
def RunChecks(self):
"""Verifies the scale factors of resources being added or modified.
Returns:
An array of presubmit errors if any images were detected not
having the correct dimensions.
"""
def ImageSize(filename):
with open(filename, 'rb', buffering=0) as f:
data = f.read(24)
assert data[:8] == '\x89PNG\r\n\x1A\n' and data[12:16] == 'IHDR'
return struct.unpack('>ii', data[16:24])
# Returns a list of valid scaled image sizes. The valid sizes are the
# floor and ceiling of (base_size * scale_percent / 100). This is equivalent
# to requiring that the actual scaled size is less than one pixel away from
# the exact scaled size.
def ValidSizes(base_size, scale_percent):
return sorted(set([(base_size * scale_percent) / 100,
(base_size * scale_percent + 99) / 100]))
repository_path = self.input_api.os_path.relpath(
self.input_api.PresubmitLocalPath(),
self.input_api.change.RepositoryRoot())
results = []
# Check for affected files in any of the paths specified.
affected_files = self.input_api.AffectedFiles(include_deletes=False)
files = []
for f in affected_files:
for path_spec in self.paths:
path_root = self.input_api.os_path.join(
repository_path, path_spec[1])
if (f.LocalPath().endswith('.png') and
f.LocalPath().startswith(path_root)):
# Only save the relative path from the resource directory.
relative_path = self.input_api.os_path.relpath(f.LocalPath(),
path_root)
if relative_path not in files:
files.append(relative_path)
for f in files:
base_image = self.input_api.os_path.join(self.paths[0][1], f)
if not os.path.exists(base_image):
results.append(self.output_api.PresubmitError(
'Base image %s does not exist' % self.input_api.os_path.join(
repository_path, base_image)))
continue
base_dimensions = ImageSize(base_image)
# Find all scaled versions of the base image and verify their sizes.
for i in range(1, len(self.paths)):
image_path = self.input_api.os_path.join(self.paths[i][1], f)
if not os.path.exists(image_path):
continue
# Ensure that each image for a particular scale factor is the
# correct scale of the base image.
scaled_dimensions = ImageSize(image_path)
for dimension_name, base_size, scaled_size in zip(
('width', 'height'), base_dimensions, scaled_dimensions):
valid_sizes = ValidSizes(base_size, self.paths[i][0])
if scaled_size not in valid_sizes:
results.append(self.output_api.PresubmitError(
'Image %s has %s %d, expected to be %s' % (
self.input_api.os_path.join(repository_path, image_path),
dimension_name,
scaled_size,
' or '.join(map(str, valid_sizes)))))
return results
| bsd-3-clause |
Panos512/invenio | modules/websubmit/lib/websubmitadmin_engine.py | 25 | 240715 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
import re
from os.path import split, basename, isfile
from os import access, F_OK, R_OK, getpid, rename, unlink
from time import strftime, localtime
from invenio.websubmitadmin_dblayer import *
from invenio.websubmitadmin_config import *
from invenio.websubmit_config import CFG_RESERVED_SUBMISSION_FILENAMES
from invenio.access_control_admin import acc_get_all_roles, acc_get_role_users, acc_delete_user_role
from invenio.config import CFG_SITE_LANG, CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.websubmitadmin_config import InvenioWebSubmitWarning
from invenio.messages import gettext_set_language
import invenio.template
try:
websubmitadmin_templates = invenio.template.load('websubmitadmin')
except:
pass
## utility functions:
def is_adminuser(req, role):
"""check if user is a registered administrator. """
return acc_authorize_action(req, role)
def check_user(req, role, adminarea=2, authorized=0):
(auth_code, auth_message) = is_adminuser(req, role)
if not authorized and auth_code != 0:
return ("false", auth_message)
return ("", auth_message)
def get_navtrail(ln=CFG_SITE_LANG):
"""gets the navtrail for title...
@param title: title of the page
@param ln: language
@return: HTML output
"""
navtrail = websubmitadmin_templates.tmpl_navtrail(ln)
return navtrail
def stringify_listvars(mylist):
"""Accept a list (or a list of lists) (or tuples).
Convert each item in the list, into a string (replace None with the empty
string "").
@param mylist: A list/tuple of values, or a list/tuple of value list/tuples.
@return: a tuple of string values or a tuple of string value tuples
"""
string_list = []
try:
if type(mylist[0]) in (tuple,list):
for row in mylist:
string_list.append(map(lambda x: x is not None and str(x) or "", row))
else:
string_list = map(lambda x: x is not None and str(x) or "", mylist)
except IndexError:
pass
return string_list
def save_update_to_file(filepath, filecontent, notruncate=0, appendmode=0):
"""Save a string value to a file.
Save will create a new file if the file does not exist. Mode can be set to truncate an older file
or to refuse to create the file if it already exists. There is also a mode to "append" the string value
to a file.
@param filepath: (string) the full path to the file
@param filecontent: (string) the content to be written to the file
@param notruncate: (integer) should be 1 or 0, defaults to 0 (ZERO). If 0, existing file will be truncated;
if 1, file will not be written if it already exists
@param appendmode: (integer) should be 1 or 0, defaults to 0 (ZERO). If 1, data will be appended to the file
if it exists; if 0, file will be truncated (or not, depending on the notruncate mode) by new data.
@return: None
@exceptions raised:
- InvenioWebSubmitAdminWarningIOError: when operations involving writing to file failed.
"""
## sanity checking:
if notruncate not in (0, 1):
notruncate = 0
if appendmode not in (0, 1):
appendmode = 0
(fpath, fname) = split(filepath)
if fname == "":
## error opening file
msg = """Unable to open filepath [%s] - couldn't determine a valid filename""" % (filepath,)
raise InvenioWebSubmitAdminWarningIOError(msg)
## if fpath is not empty, append the trailing "/":
if fpath != "":
fpath += "/"
if appendmode == 0:
if notruncate != 0 and access("%s%s" % (fpath, fname), F_OK):
## in no-truncate mode, but file already exists!
msg = """Unable to write to file [%s] in "no-truncate mode" because file already exists"""\
% (fname,)
raise InvenioWebSubmitAdminWarningIOError(msg)
## file already exists, make temporary file first, then move it later
tmpfname = "%s_%s_%s" % (fname, strftime("%Y%m%d%H%M%S", localtime()), getpid())
## open temp file for writing:
try:
fp = open("%s%s" % (fpath, tmpfname), "w")
except IOError, e:
## cannot open file
msg = """Unable to write to file [%s%s] - cannot open file for writing""" % (fpath, fname)
raise InvenioWebSubmitAdminWarningIOError(msg)
## write contents to temp file:
try:
fp.write(filecontent)
fp.flush()
fp.close()
except IOError, e:
## could not write to temp file
msg = """Unable to write to file [%s]""" % (tmpfname,)
## remove the "temp file"
try:
fp.close()
unlink("%s%s" % (fpath, tmpfname))
except IOError:
pass
raise InvenioWebSubmitAdminWarningIOError(msg)
## rename temp file to final filename:
try:
rename("%s%s" % (fpath, tmpfname), "%s%s" % (fpath, fname))
except OSError:
## couldnt rename the tmp file to final file name
msg = """Unable to write to file [%s] - created temporary file [%s], but could not then rename it to [%s]"""\
% (fname, tmpfname, fname)
raise InvenioWebSubmitAdminWarningIOError(msg)
else:
## append mode:
try:
fp = open("%s%s" % (fpath, fname), "a")
except IOError, e:
## cannot open file
msg = """Unable to write to file [%s] - cannot open file for writing in append mode""" % (fname,)
raise InvenioWebSubmitAdminWarningIOError(msg)
## write contents to temp file:
try:
fp.write(filecontent)
fp.flush()
fp.close()
except IOError, e:
## could not write to temp file
msg = """Unable to write to file [%s] in append mode""" % (fname,)
## close the file
try:
fp.close()
except IOError:
pass
raise InvenioWebSubmitAdminWarningIOError(msg)
return
def string_is_alphanumeric_including_underscore(txtstring):
p_txtstring = re.compile(r'^\w*$')
m_txtstring = p_txtstring.search(txtstring)
if m_txtstring is not None:
return 1
else:
return 0
def function_name_is_valid(fname):
p_fname = re.compile(r'^(_|[a-zA-Z])\w*$')
m_fname = p_fname.search(fname)
if m_fname is not None:
return 1
else:
return 0
def wash_single_urlarg(urlarg, argreqdtype, argdefault, maxstrlen=None, minstrlen=None, truncatestr=0):
"""Wash a single argument according to some specifications.
@param urlarg: the argument to be tested, as passed from the form/url, etc
@param argreqdtype: (a python type) the type that the argument should conform to (argument required
type)
@argdefault: the default value that should be returned for the argument in the case that it
doesn't comply with the washing specifications
@param maxstrlen: (integer) the maximum length for a string argument; defaults to None, which means
that no maximum length is forced upon the string
@param minstrlen: (integer) the minimum length for a string argument; defaults to None, which means
that no minimum length is forced upon the string
@truncatestr: (integer) should be 1 or 0 (ZERO). A flag used to determine whether or not a string
argument that overstretches the maximum length (if one if provided) should be truncated, or reset
to the default for the argument. 0, means don't truncate and reset the argument; 1 means truncate
the string.
@return: the washed argument
@exceptions raised:
- ValueError: when it is not possible to cast an argument to the type passed as argreqdtype
"""
## sanity checking:
if maxstrlen is not None and type(maxstrlen) is not int:
maxstrlen = None
elif maxstrlen is int and maxstrlen < 1:
maxstrlen = None
if minstrlen is not None and type(minstrlen) is not int:
minstrlen = None
elif minstrlen is int and minstrlen < 1:
minstrlen = None
result = ""
arg_dst_type = argreqdtype
## if no urlarg, return the default for that argument:
if urlarg is None:
result = argdefault
return result
## get the type of the argument passed:
arg_src_type = type(urlarg)
value = urlarg
# First, handle the case where we want all the results. In
# this case, we need to ensure all the elements are strings,
# and not Field instances.
if arg_src_type in (list, tuple):
if arg_dst_type is list:
result = [str(x) for x in value]
return result
if arg_dst_type is tuple:
result = tuple([str(x) for x in value])
return result
# in all the other cases, we are only interested in the
# first value.
value = value[0]
# Maybe we already have what is expected? Then don't change
# anything.
if arg_src_type is arg_dst_type:
result = value
if arg_dst_type is str and maxstrlen is not None and len(result) > maxstrlen:
if truncatestr != 0:
result = result[0:maxstrlen]
else:
result = argdefault
elif arg_dst_type is str and minstrlen is not None and len(result) < minstrlen:
result = argdefault
return result
if arg_dst_type in (str, int):
try:
result = arg_dst_type(value)
if arg_dst_type is str and maxstrlen is not None and len(result) > maxstrlen:
if truncatestr != 0:
result = result[0:maxstrlen]
else:
result = argdefault
elif arg_dst_type is str and minstrlen is not None and len(result) < minstrlen:
result = argdefault
except:
result = argdefault
elif arg_dst_type is tuple:
result = (value,)
elif arg_dst_type is list:
result = [value]
elif arg_dst_type is dict:
result = {0: str(value)}
else:
raise ValueError('cannot cast form argument into type %r' % (arg_dst_type,))
return result
## Internal Business-Logic functions
## Functions for managing collection order, etc:
def build_submission_collection_tree(collection_id, has_brother_above=0, has_brother_below=0):
## get the name of this collection:
collection_name = get_collection_name(collection_id)
if collection_name is None:
collection_name = "Unknown Collection"
## make a data-structure containing the details of the collection:
collection_node = { 'collection_id' : collection_id, ## collection ID
'collection_name' : collection_name, ## collection Name
'collection_children' : [], ## list of 'collection' children nodes
'doctype_children' : [], ## list of 'doctype' children
'has_brother_above' : has_brother_above, ## has a sibling collection above in score
'has_brother_below' : has_brother_below, ## has a sibling collection below in score
}
## get the IDs and names of all doctypes attached to this collection:
res_doctype_children = get_doctype_children_of_collection(collection_id)
## for each child, add its details to the list of doctype children for this node:
for doctype in res_doctype_children:
doctype_node = { 'doctype_id' : doctype[0],
'doctype_lname' : doctype[1],
'catalogue_order' : doctype[2],
}
collection_node['doctype_children'].append(doctype_node)
## now get details of all collections attached to this one:
res_collection_children = get_collection_children_of_collection(collection_id)
num_collection_children = len(res_collection_children)
for child_num in xrange(0, num_collection_children):
brother_below = brother_above = 0
if child_num > 0:
## this is not the first brother - it has a brother above
brother_above = 1
if child_num < num_collection_children - 1:
## this is not the last brother - it has a brother below
brother_below = 1
collection_node['collection_children'].append(\
build_submission_collection_tree(collection_id=res_collection_children[child_num][0],
has_brother_above=brother_above,
has_brother_below=brother_below))
## return the built collection tree:
return collection_node
def _organise_submission_page_display_submission_tree(user_msg=""):
title = "Organise WebSubmit Main Page"
body = ""
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
## Get the submissions tree:
submission_collection_tree = build_submission_collection_tree(0)
## Get all 'submission collections':
submission_collections = get_details_of_all_submission_collections()
sub_col = [('0', 'Top Level')]
for collection in submission_collections:
sub_col.append((str(collection[0]), str(collection[1])))
## Get all document types:
doctypes = get_docid_docname_and_docid_alldoctypes()
## build the page:
body = websubmitadmin_templates.tmpl_display_submission_page_organisation(submission_collection_tree=submission_collection_tree,
submission_collections=sub_col,
doctypes=doctypes,
user_msg=user_msg)
return (title, body)
def _delete_submission_collection(sbmcolid):
"""Recursively calls itself to delete a submission-collection and all of its
attached children (and their children, etc) from the submission-tree.
@param sbmcolid: (integer) - the ID of the submission-collection to be deleted.
@return: None
@Exceptions raised: InvenioWebSubmitAdminWarningDeleteFailed when it was not
possible to delete the submission-collection or some of its children.
"""
## Get the collection-children of this submission-collection:
collection_children = get_collection_children_of_collection(sbmcolid)
## recursively move through each collection-child:
for collection_child in collection_children:
_delete_submission_collection(collection_child[0])
## delete all document-types attached to this submission-collection:
error_code = delete_doctype_children_from_submission_collection(sbmcolid)
if error_code != 0:
## Unable to delete all doctype-children:
err_msg = "Unable to delete doctype children of submission-collection [%s]" % sbmcolid
raise InvenioWebSubmitAdminWarningDeleteFailed(err_msg)
## delete this submission-collection's entry from the sbmCOLLECTION_sbmCOLLECTION table:
error_code = delete_submission_collection_from_submission_tree(sbmcolid)
if error_code != 0:
## Unable to delete submission-collection from the submission-tree:
err_msg = "Unable to delete submission-collection [%s] from submission-tree" % sbmcolid
raise InvenioWebSubmitAdminWarningDeleteFailed(err_msg)
## Now delete this submission-collection's details:
error_code = delete_submission_collection_details(sbmcolid)
if error_code != 0:
## Unable to delete the details of the submission-collection:
err_msg = "Unable to delete details of submission-collection [%s]" % sbmcolid
raise InvenioWebSubmitAdminWarningDeleteFailed(err_msg)
## return
return
def perform_request_organise_submission_page(doctype="",
sbmcolid="",
catscore="",
addsbmcollection="",
deletesbmcollection="",
addtosbmcollection="",
adddoctypes="",
movesbmcollectionup="",
movesbmcollectiondown="",
deletedoctypefromsbmcollection="",
movedoctypeupinsbmcollection="",
movedoctypedowninsbmcollection=""):
user_msg = []
body = ""
if "" not in (deletedoctypefromsbmcollection, sbmcolid, catscore, doctype):
## delete a document type from it's position in the tree
error_code = delete_doctype_from_position_on_submission_page(doctype, sbmcolid, catscore)
if error_code == 0:
## doctype deleted - now normalize scores of remaining doctypes:
normalize_scores_of_doctype_children_for_submission_collection(sbmcolid)
user_msg.append("Document type successfully deleted from submissions tree")
else:
user_msg.append("Unable to delete document type from submission-collection")
## display submission-collections:
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (deletesbmcollection, sbmcolid):
## try to delete the submission-collection from the tree:
try:
_delete_submission_collection(sbmcolid)
user_msg.append("Submission-collection successfully deleted from submissions tree")
except InvenioWebSubmitAdminWarningDeleteFailed, excptn:
user_msg.append(str(excptn))
## re-display submission-collections:
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movedoctypedowninsbmcollection, sbmcolid, doctype, catscore):
## move a doctype down in order for a submission-collection:
## normalize scores of all doctype-children of the submission-collection:
normalize_scores_of_doctype_children_for_submission_collection(sbmcolid)
## swap this doctype with that below it:
## Get score of doctype to move:
score_doctype_to_move = get_catalogue_score_of_doctype_child_of_submission_collection(sbmcolid, doctype)
## Get score of the doctype brother directly below the doctype to be moved:
score_brother_below = get_score_of_next_doctype_child_below(sbmcolid, score_doctype_to_move)
if None in (score_doctype_to_move, score_brother_below):
user_msg.append("Unable to move document type down")
else:
## update the brother below the doctype to be moved to have a score the same as the doctype to be moved:
update_score_of_doctype_child_of_submission_collection_at_scorex(sbmcolid, score_brother_below, score_doctype_to_move)
## Update the doctype to be moved to have a score of the brother directly below it:
update_score_of_doctype_child_of_submission_collection_with_doctypeid_and_scorex(sbmcolid,
doctype,
score_doctype_to_move,
score_brother_below)
user_msg.append("Document type moved down")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movedoctypeupinsbmcollection, sbmcolid, doctype, catscore):
## move a doctype up in order for a submission-collection:
## normalize scores of all doctype-children of the submission-collection:
normalize_scores_of_doctype_children_for_submission_collection(sbmcolid)
## swap this doctype with that above it:
## Get score of doctype to move:
score_doctype_to_move = get_catalogue_score_of_doctype_child_of_submission_collection(sbmcolid, doctype)
## Get score of the doctype brother directly above the doctype to be moved:
score_brother_above = get_score_of_previous_doctype_child_above(sbmcolid, score_doctype_to_move)
if None in (score_doctype_to_move, score_brother_above):
user_msg.append("Unable to move document type up")
else:
## update the brother above the doctype to be moved to have a score the same as the doctype to be moved:
update_score_of_doctype_child_of_submission_collection_at_scorex(sbmcolid, score_brother_above, score_doctype_to_move)
## Update the doctype to be moved to have a score of the brother directly above it:
update_score_of_doctype_child_of_submission_collection_with_doctypeid_and_scorex(sbmcolid,
doctype,
score_doctype_to_move,
score_brother_above)
user_msg.append("Document type moved up")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movesbmcollectiondown, sbmcolid):
## move a submission-collection down in order:
## Sanity checking:
try:
int(sbmcolid)
except ValueError:
sbmcolid = 0
if int(sbmcolid) != 0:
## Get father ID of submission-collection:
sbmcolidfather = get_id_father_of_collection(sbmcolid)
if sbmcolidfather is None:
user_msg.append("Unable to move submission-collection downwards")
else:
## normalize scores of all collection-children of the father submission-collection:
normalize_scores_of_collection_children_of_collection(sbmcolidfather)
## swap this collection with the one above it:
## get the score of the collection to move:
score_col_to_move = get_score_of_collection_child_of_submission_collection(sbmcolidfather, sbmcolid)
## get the score of the collection brother directly below the collection to be moved:
score_brother_below = get_score_of_next_collection_child_below(sbmcolidfather, score_col_to_move)
if None in (score_col_to_move, score_brother_below):
## Invalid movement
user_msg.append("Unable to move submission collection downwards")
else:
## update the brother below the collection to be moved to have a score the same as the collection to be moved:
update_score_of_collection_child_of_submission_collection_at_scorex(sbmcolidfather,
score_brother_below,
score_col_to_move)
## Update the collection to be moved to have a score of the brother directly below it:
update_score_of_collection_child_of_submission_collection_with_colid_and_scorex(sbmcolidfather,
sbmcolid,
score_col_to_move,
score_brother_below)
user_msg.append("Submission-collection moved downwards")
else:
## cannot move the master (0) collection
user_msg.append("Unable to move submission-collection downwards")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (movesbmcollectionup, sbmcolid):
## move a submission-collection up in order:
## Sanity checking:
try:
int(sbmcolid)
except ValueError:
sbmcolid = 0
if int(sbmcolid) != 0:
## Get father ID of submission-collection:
sbmcolidfather = get_id_father_of_collection(sbmcolid)
if sbmcolidfather is None:
user_msg.append("Unable to move submission-collection upwards")
else:
## normalize scores of all collection-children of the father submission-collection:
normalize_scores_of_collection_children_of_collection(sbmcolidfather)
## swap this collection with the one above it:
## get the score of the collection to move:
score_col_to_move = get_score_of_collection_child_of_submission_collection(sbmcolidfather, sbmcolid)
## get the score of the collection brother directly above the collection to be moved:
score_brother_above = get_score_of_previous_collection_child_above(sbmcolidfather, score_col_to_move)
if None in (score_col_to_move, score_brother_above):
## Invalid movement
user_msg.append("Unable to move submission collection upwards")
else:
## update the brother above the collection to be moved to have a score the same as the collection to be moved:
update_score_of_collection_child_of_submission_collection_at_scorex(sbmcolidfather,
score_brother_above,
score_col_to_move)
## Update the collection to be moved to have a score of the brother directly above it:
update_score_of_collection_child_of_submission_collection_with_colid_and_scorex(sbmcolidfather,
sbmcolid,
score_col_to_move,
score_brother_above)
user_msg.append("Submission-collection moved upwards")
else:
## cannot move the master (0) collection
user_msg.append("Unable to move submission-collection upwards")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (addsbmcollection, addtosbmcollection):
## Add a submission-collection, attached to a submission-collection:
## check that the collection to attach to exists:
parent_ok = 0
if int(addtosbmcollection) != 0:
parent_name = get_collection_name(addtosbmcollection)
if parent_name is not None:
parent_ok = 1
else:
parent_ok = 1
if parent_ok != 0:
## create the new collection:
id_son = insert_submission_collection(addsbmcollection)
## get the maximum catalogue score of the existing collection children:
max_child_score = \
get_maximum_catalogue_score_of_collection_children_of_submission_collection(addtosbmcollection)
## add it to the collection, at a higher score than the others have:
new_score = max_child_score + 1
insert_collection_child_for_submission_collection(addtosbmcollection, id_son, new_score)
user_msg.append("Submission-collection added to submissions tree")
else:
## Parent submission-collection does not exist:
user_msg.append("Unable to add submission-collection - parent unknown")
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
elif "" not in (adddoctypes, addtosbmcollection):
## Add document type(s) to a submission-collection:
if type(adddoctypes) == str:
adddoctypes = [adddoctypes,]
## Does submission-collection exist?
num_collections_sbmcolid = get_number_of_rows_for_submission_collection(addtosbmcollection)
if num_collections_sbmcolid > 0:
for doctypeid in adddoctypes:
## Check that Doctype exists:
num_doctypes_doctypeid = get_number_doctypes_docid(doctypeid)
if num_doctypes_doctypeid < 1:
## Cannot connect an unknown doctype:
user_msg.append("Unable to connect unknown document-type [%s] to a submission-collection" \
% doctypeid)
continue
else:
## insert the submission-collection/doctype link:
## get the maximum catalogue score of the existing doctype children:
max_child_score = \
get_maximum_catalogue_score_of_doctype_children_of_submission_collection(addtosbmcollection)
## add it to the new doctype, at a higher score than the others have:
new_score = max_child_score + 1
insert_doctype_child_for_submission_collection(addtosbmcollection, doctypeid, new_score)
user_msg.append("Document-type added to submissions tree")
else:
## submission-collection didn't exist
user_msg.append("The selected submission-collection doesn't seem to exist")
## Check that submission-collection exists:
## insert
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
else:
## default action - display submission-collections:
(title, body) = _organise_submission_page_display_submission_tree(user_msg=user_msg)
return (title, body)
## Functions for adding new catalgue to DB:
def _add_new_action(actid,actname,working_dir,status_text):
"""Insert the details of a new action into the websubmit system database.
@param actid: unique action id (sactname)
@param actname: action name (lactname)
@param working_dir: directory action works from (dir)
@param status_text: text string indicating action status (statustext)
"""
(actid,actname,working_dir,status_text) = (str(actid).upper(),str(actname),str(working_dir),str(status_text))
err_code = insert_action_details(actid,actname,working_dir,status_text)
return err_code
def perform_request_add_function(funcname=None, funcdescr=None, funcaddcommit=""):
user_msg = []
body = ""
title = "Create New WebSubmit Function"
commit_error=0
## wash args:
if funcname is not None:
try:
funcname = wash_single_urlarg(urlarg=funcname, argreqdtype=str, argdefault="", maxstrlen=40, minstrlen=1)
if function_name_is_valid(fname=funcname) == 0:
funcname = ""
except ValueError, e:
funcname = ""
else:
funcname = ""
if funcdescr is not None:
try:
funcdescr = wash_single_urlarg(urlarg=funcdescr, argreqdtype=str, argdefault="")
except ValueError, e:
funcdescr = ""
else:
funcdescr = ""
## process request:
if funcaddcommit != "" and funcaddcommit is not None:
if funcname == "":
funcname = ""
user_msg.append("""Function name is mandatory and must be a string with no more than 40 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters, beginning with a """\
"""letter or underscore""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcdescr=funcdescr, user_msg=user_msg)
return (title, body)
## Add a new function definition - IF it is not already present
err_code = insert_function_details(funcname, funcdescr)
## Handle error code - redisplay form with warning about no DB commit, or display with options
## to edit function:
if err_code == 0:
user_msg.append("""'%s' Function Added to WebSubmit""" % (funcname,))
all_function_parameters = get_distinct_paramname_all_websubmit_function_parameters()
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcname=funcname,
funcdescr=funcdescr,
all_websubmit_func_parameters=all_function_parameters,
perform_act="functionedit",
user_msg=user_msg)
else:
## Could not commit function to WebSubmit DB - redisplay form with function description:
user_msg.append("""Could Not Add '%s' Function to WebSubmit""" % (funcname,))
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcdescr=funcdescr, user_msg=user_msg)
else:
## Display Web form for new function addition:
body = websubmitadmin_templates.tmpl_display_addfunctionform()
return (title, body)
def perform_request_add_action(actid=None, actname=None, working_dir=None, status_text=None, actcommit=""):
"""An interface for the addition of a new WebSubmit action.
If form fields filled, will insert new action into WebSubmit database, else will display
web form prompting for action details.
@param actid: unique id for new action
@param actname: name of new action
@param working_dir: action working directory for WebSubmit core
@param status_text: status text displayed at end of action
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Create New WebSubmit Action"
commit_error=0
## wash args:
if actid is not None:
try:
actid = wash_single_urlarg(urlarg=actid, argreqdtype=str, argdefault="", maxstrlen=3, minstrlen=3)
if string_is_alphanumeric_including_underscore(txtstring=actid) == 0:
actid = ""
except ValueError, e:
actid = ""
else:
actid = ""
if actname is not None:
try:
actname = wash_single_urlarg(urlarg=actname, argreqdtype=str, argdefault="")
except ValueError, e:
actname = ""
else:
actname = ""
if working_dir is not None:
try:
working_dir = wash_single_urlarg(urlarg=working_dir, argreqdtype=str, argdefault="")
except ValueError, e:
working_dir = ""
else:
working_dir = ""
if status_text is not None:
try:
status_text = wash_single_urlarg(urlarg=status_text, argreqdtype=str, argdefault="")
except ValueError, e:
status_text = ""
else:
status_text = ""
## process request:
if actcommit != "" and actcommit is not None:
if actid in ("", None):
actid = ""
user_msg.append("""Action ID is mandatory and must be a 3 letter string""")
commit_error = 1
if actname in ("", None):
actname = ""
user_msg.append("""Action description is mandatory""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addactionform(actid=actid, actname=actname, working_dir=working_dir,\
status_text=status_text, user_msg=user_msg)
return (title, body)
## Commit new action to WebSubmit DB:
err_code = _add_new_action(actid,actname,working_dir,status_text)
## Handle error code - redisplay form with warning about no DB commit, or move to list
## of actions
if err_code == 0:
## Action added: show page listing WebSubmit actions
user_msg = """'%s' Action Added to WebSubmit""" % (actid,)
all_actions = get_actid_actname_allactions()
body = websubmitadmin_templates.tmpl_display_allactions(all_actions,user_msg=user_msg)
title = "Available WebSubmit Actions"
else:
## Could not commit action to WebSubmit DB redisplay form with completed details and error message
## warnings.append(('ERR_WEBSUBMIT_ADMIN_ADDACTIONFAILDUPLICATE',actid) ## TODO
user_msg = """Could Not Add '%s' Action to WebSubmit""" % (actid,)
body = websubmitadmin_templates.tmpl_display_addactionform(actid=actid, actname=actname, working_dir=working_dir, \
status_text=status_text, user_msg=user_msg)
else:
## Display Web form for new action details:
body = websubmitadmin_templates.tmpl_display_addactionform()
return (title, body)
def perform_request_add_jscheck(chname=None, chdesc=None, chcommit=""):
"""An interface for the addition of a new WebSubmit JavaScript Check, as used on form elements.
If form fields filled, will insert new Check into WebSubmit database, else will display
Web form prompting for Check details.
@param chname: unique id/name for new Check
@param chdesc: description (JavaScript code body) of new Check
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Create New WebSubmit Checking Function"
commit_error=0
## wash args:
if chname is not None:
try:
chname = wash_single_urlarg(urlarg=chname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if function_name_is_valid(fname=chname) == 0:
chname = ""
except ValueError, e:
chname = ""
else:
chname = ""
if chdesc is not None:
try:
chdesc = wash_single_urlarg(urlarg=chdesc, argreqdtype=str, argdefault="")
except ValueError, e:
chdesc = ""
else:
chdesc = ""
## process request:
if chcommit != "" and chcommit is not None:
if chname in ("", None):
chname = ""
user_msg.append("""Check name is mandatory and must be a string with no more than 15 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters, beginning with a """\
"""letter or underscore""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=chname, chdesc=chdesc, user_msg=user_msg)
return (title, body)
## Commit new check to WebSubmit DB:
err_code = insert_jscheck_details(chname, chdesc)
## Handle error code - redisplay form wih warning about no DB commit, or move to list
## of checks
if err_code == 0:
## Check added: show page listing WebSubmit JS Checks
user_msg.append("""'%s' Checking Function Added to WebSubmit""" % (chname,))
all_jschecks = get_chname_alljschecks()
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
title = "Available WebSubmit Checking Functions"
else:
## Could not commit Check to WebSubmit DB: redisplay form with completed details and error message
## TODO : Warning Message
user_msg.append("""Could Not Add '%s' Checking Function to WebSubmit""" % (chname,))
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=chname, chdesc=chdesc, user_msg=user_msg)
else:
## Display Web form for new check details:
body = websubmitadmin_templates.tmpl_display_addjscheckform()
return (title, body)
def perform_request_add_element(elname=None, elmarccode=None, eltype=None, elsize=None, elrows=None, \
elcols=None, elmaxlength=None, elval=None, elfidesc=None, \
elmodifytext=None, elcommit=""):
"""An interface for adding a new ELEMENT to the WebSubmit DB.
@param elname: (string) element name.
@param elmarccode: (string) element's MARC code.
@param eltype: (character) element type.
@param elsize: (integer) element size.
@param elrows: (integer) number of rows in element.
@param elcols: (integer) number of columns in element.
@param elmaxlength: (integer) maximum length of element
@param elval: (string) default value of element
@param elfidesc: (string) description of element
@param elmodifytext: (string) modification text of element
@param elcommit: (string) If this value is not empty, attempt to commit element details to WebSubmit DB
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Create New WebSubmit Element"
commit_error=0
## wash args:
if elname is not None:
try:
elname = wash_single_urlarg(urlarg=elname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=elname) == 0:
elname = ""
except ValueError, e:
elname = ""
else:
elname = ""
if elmarccode is not None:
try:
elmarccode = wash_single_urlarg(urlarg=elmarccode, argreqdtype=str, argdefault="")
except ValueError, e:
elmarccode = ""
else:
elmarccode = ""
if eltype is not None:
try:
eltype = wash_single_urlarg(urlarg=eltype, argreqdtype=str, argdefault="", maxstrlen=1, minstrlen=1)
except ValueError, e:
eltype = ""
else:
eltype = ""
if elsize is not None:
try:
elsize = wash_single_urlarg(urlarg=elsize, argreqdtype=int, argdefault="")
except ValueError, e:
elsize = ""
else:
elsize = ""
if elrows is not None:
try:
elrows = wash_single_urlarg(urlarg=elrows, argreqdtype=int, argdefault="")
except ValueError, e:
elrows = ""
else:
elrows = ""
if elcols is not None:
try:
elcols = wash_single_urlarg(urlarg=elcols, argreqdtype=int, argdefault="")
except ValueError, e:
elcols = ""
else:
elcols = ""
if elmaxlength is not None:
try:
elmaxlength = wash_single_urlarg(urlarg=elmaxlength, argreqdtype=int, argdefault="")
except ValueError, e:
elmaxlength = ""
else:
elmaxlength = ""
if elval is not None:
try:
elval = wash_single_urlarg(urlarg=elval, argreqdtype=str, argdefault="")
except ValueError, e:
elval = ""
else:
elval = ""
if elfidesc is not None:
try:
elfidesc = wash_single_urlarg(urlarg=elfidesc, argreqdtype=str, argdefault="")
except ValueError, e:
elfidesc = ""
else:
elfidesc = ""
if elmodifytext is not None:
try:
elmodifytext = wash_single_urlarg(urlarg=elmodifytext, argreqdtype=str, argdefault="")
except ValueError, e:
elmodifytext = ""
else:
elmodifytext = ""
## process request:
if elcommit != "" and elcommit is not None:
if elname == "":
elname = ""
user_msg.append("""The element name is mandatory and must be a string with no more than 15 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters""")
commit_error = 1
if eltype == "" or eltype not in ("D", "F", "H", "I", "R", "S", "T"):
eltype = ""
user_msg.append("""The element type is mandatory and must be selected from the list""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=str(elsize),
elrows=str(elrows),
elcols=str(elcols),
elmaxlength=str(elmaxlength),
elval=elval,
elfidesc=elfidesc,
elmodifytext=elmodifytext,
user_msg=user_msg,
)
return (title, body)
## Commit new element description to WebSubmit DB:
err_code = insert_element_details(elname=elname, elmarccode=elmarccode, eltype=eltype, \
elsize=elsize, elrows=elrows, elcols=elcols, \
elmaxlength=elmaxlength, elval=elval, elfidesc=elfidesc, \
elmodifytext=elmodifytext)
if err_code == 0:
## Element added: show page listing WebSubmit elements
user_msg.append("""'%s' Element Added to WebSubmit""" % (elname,))
if elname in CFG_RESERVED_SUBMISSION_FILENAMES:
user_msg.append("""WARNING: '%s' is a reserved name. Check WebSubmit admin guide to be aware of possible side-effects.""" % elname)
title = "Available WebSubmit Elements"
all_elements = get_elename_allelements()
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
else:
## Could not commit element to WebSubmit DB: redisplay form with completed details and error message
## TODO : Warning Message
user_msg.append("""Could Not Add '%s' Element to WebSubmit""" % (elname,))
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=str(elsize),
elrows=str(elrows),
elcols=str(elcols),
elmaxlength=str(elmaxlength),
elval=elval,
elfidesc=elfidesc,
elmodifytext=elmodifytext,
user_msg=user_msg,
)
else:
## Display Web form for new element details:
body = websubmitadmin_templates.tmpl_display_addelementform()
return (title, body)
def perform_request_edit_element(elname, elmarccode=None, eltype=None, elsize=None, \
elrows=None, elcols=None, elmaxlength=None, elval=None, \
elfidesc=None, elmodifytext=None, elcommit=""):
"""An interface for the editing and updating the details of a WebSubmit ELEMENT.
@param elname: element name.
@param elmarccode: element's MARC code.
@param eltype: element type.
@param elsize: element size.
@param elrows: number of rows in element.
@param elcols: number of columns in element.
@param elmaxlength: maximum length of element
@param elval: default value of element
@param elfidesc: description of element
@param elmodifytext: modification text of element
@param elcommit: If this value is not empty, attempt to commit element details to WebSubmit DB
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Edit WebSubmit Element"
commit_error=0
## wash args:
if elname is not None:
try:
elname = wash_single_urlarg(urlarg=elname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=elname) == 0:
elname = ""
except ValueError, e:
elname = ""
else:
elname = ""
if elmarccode is not None:
try:
elmarccode = wash_single_urlarg(urlarg=elmarccode, argreqdtype=str, argdefault="")
except ValueError, e:
elmarccode = ""
else:
elmarccode = ""
if eltype is not None:
try:
eltype = wash_single_urlarg(urlarg=eltype, argreqdtype=str, argdefault="", maxstrlen=1, minstrlen=1)
except ValueError, e:
eltype = ""
else:
eltype = ""
if elsize is not None:
try:
elsize = wash_single_urlarg(urlarg=elsize, argreqdtype=int, argdefault="")
except ValueError, e:
elsize = ""
else:
elsize = ""
if elrows is not None:
try:
elrows = wash_single_urlarg(urlarg=elrows, argreqdtype=int, argdefault="")
except ValueError, e:
elrows = ""
else:
elrows = ""
if elcols is not None:
try:
elcols = wash_single_urlarg(urlarg=elcols, argreqdtype=int, argdefault="")
except ValueError, e:
elcols = ""
else:
elcols = ""
if elmaxlength is not None:
try:
elmaxlength = wash_single_urlarg(urlarg=elmaxlength, argreqdtype=int, argdefault="")
except ValueError, e:
elmaxlength = ""
else:
elmaxlength = ""
if elval is not None:
try:
elval = wash_single_urlarg(urlarg=elval, argreqdtype=str, argdefault="")
except ValueError, e:
elval = ""
else:
elval = ""
if elfidesc is not None:
try:
elfidesc = wash_single_urlarg(urlarg=elfidesc, argreqdtype=str, argdefault="")
except ValueError, e:
elfidesc = ""
else:
elfidesc = ""
if elmodifytext is not None:
try:
elmodifytext = wash_single_urlarg(urlarg=elmodifytext, argreqdtype=str, argdefault="")
except ValueError, e:
elmodifytext = ""
else:
elmodifytext = ""
## process request:
if elcommit != "" and elcommit is not None:
if elname == "":
elname = ""
user_msg.append("""Invalid Element Name!""")
commit_error = 1
if eltype == "" or eltype not in ("D", "F", "H", "I", "R", "S", "T"):
eltype = ""
user_msg.append("""Invalid Element Type!""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
all_elements = get_elename_allelements()
user_msg.append("""Could Not Update Element""")
title = "Available WebSubmit Elements"
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
return (title, body)
## Commit updated element description to WebSubmit DB:
err_code = update_element_details(elname=elname, elmarccode=elmarccode, eltype=eltype, \
elsize=elsize, elrows=elrows, elcols=elcols, \
elmaxlength=elmaxlength, elval=elval, elfidesc=elfidesc, \
elmodifytext=elmodifytext)
if err_code == 0:
## Element Updated: Show All Element Details Again
user_msg.append("""'%s' Element Updated""" % (elname,))
## Get submission page usage of element:
el_use = get_doctype_action_pagenb_for_submissions_using_element(elname)
element_dets = get_element_details(elname)
element_dets = stringify_listvars(element_dets)
## Take elements from results tuple:
(elmarccode, eltype, elsize, elrows, elcols, elmaxlength, \
elval, elfidesc, elcd, elmd, elmodifytext) = \
(element_dets[0][0], element_dets[0][1], element_dets[0][2], element_dets[0][3], \
element_dets[0][4], element_dets[0][5], element_dets[0][6], element_dets[0][7], \
element_dets[0][8], element_dets[0][9], element_dets[0][10])
## Pass to template:
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=elsize,
elrows=elrows,
elcols=elcols,
elmaxlength=elmaxlength,
elval=elval,
elfidesc=elfidesc,
elcd=elcd,
elmd=elmd,
elmodifytext=elmodifytext,
perform_act="elementedit",
user_msg=user_msg,
el_use_tuple=el_use
)
else:
## Could Not Update Element: Maybe Key Violation, or Invalid elname? Redisplay all elements.
## TODO : LOGGING
all_elements = get_elename_allelements()
user_msg.append("""Could Not Update Element '%s'""" % (elname,))
title = "Available WebSubmit Elements"
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
else:
## Display Web form containing existing details of element:
element_dets = get_element_details(elname)
## Get submission page usage of element:
el_use = get_doctype_action_pagenb_for_submissions_using_element(elname)
num_rows_ret = len(element_dets)
element_dets = stringify_listvars(element_dets)
if num_rows_ret == 1:
## Display Element details
## Take elements from results tuple:
(elmarccode, eltype, elsize, elrows, elcols, elmaxlength, \
elval, elfidesc, elcd, elmd, elmodifytext) = \
(element_dets[0][0], element_dets[0][1], element_dets[0][2], element_dets[0][3], \
element_dets[0][4], element_dets[0][5], element_dets[0][6], element_dets[0][7], \
element_dets[0][8], element_dets[0][9], element_dets[0][10])
## Pass to template:
body = websubmitadmin_templates.tmpl_display_addelementform(elname=elname,
elmarccode=elmarccode,
eltype=eltype,
elsize=elsize,
elrows=elrows,
elcols=elcols,
elmaxlength=elmaxlength,
elval=elval,
elfidesc=elfidesc,
elcd=elcd,
elmd=elmd,
elmodifytext=elmodifytext,
perform_act="elementedit",
el_use_tuple=el_use
)
else:
## Either no rows, or more than one row for ELEMENT: log error, and display all Elements
## TODO : LOGGING
title = "Available WebSubmit Elements"
all_elements = get_elename_allelements()
if num_rows_ret > 1:
## Key Error - duplicated elname
user_msg.append("""Found Several Rows for Element with Name '%s' - Inform Administrator""" % (elname,))
## LOG MESSAGE
else:
## No rows for ELEMENT
user_msg.append("""Could Not Find Any Rows for Element with Name '%s'""" % (elname,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_allelements(all_elements, user_msg=user_msg)
return (title, body)
def _display_edit_check_form(chname, user_msg=""):
title = "Edit WebSubmit Checking Function"
if user_msg == "":
user_msg = []
jscheck_dets = get_jscheck_details(chname)
num_rows_ret = len(jscheck_dets)
if num_rows_ret == 1:
## Display Check details
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=jscheck_dets[0][0],
chdesc=jscheck_dets[0][1],
perform_act="jscheckedit",
cd=jscheck_dets[0][2],
md=jscheck_dets[0][3],
user_msg=user_msg)
else:
## Either no rows, or more than one row for Check: log error, and display all Checks
## TODO : LOGGING
title = "Available WebSubmit Checking Functions"
all_jschecks = get_chname_alljschecks()
if num_rows_ret > 1:
## Key Error - duplicated chname
user_msg.append("""Found Several Rows for Checking Function with Name '%s' - Inform Administrator""" % (chname,))
## LOG MESSAGE
else:
## No rows for action
user_msg.append("""Could Not Find Any Rows for Checking Function with Name '%s'""" % (chname,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
return (title, body)
def perform_request_edit_jscheck(chname, chdesc=None, chcommit=""):
"""Interface for editing and updating the details of a WebSubmit Check.
If only "chname" provided, will display the details of a Check in a Web form.
If "chdesc" not empty, will assume that this is a call to commit update to Check details.
@param chname: unique id for Check
@param chdesc: modified value for WebSubmit Check description (code body) - (presence invokes update)
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Edit WebSubmit Checking Function"
commit_error=0
## wash args:
if chname is not None:
try:
chname = wash_single_urlarg(urlarg=chname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if function_name_is_valid(fname=chname) == 0:
chname = ""
except ValueError, e:
chname = ""
else:
chname = ""
if chdesc is not None:
try:
chdesc = wash_single_urlarg(urlarg=chdesc, argreqdtype=str, argdefault="")
except ValueError, e:
chdesc = ""
else:
chdesc = ""
(chname, chdesc) = (str(chname), str(chdesc))
if chcommit != "" and chcommit is not None:
if chname in ("", None):
chname = ""
user_msg.append("""Check name is mandatory and must be a string with no more than 15 characters""")
user_msg.append("""It must contain only alpha-numeric and underscore characters, beginning with a """\
"""letter or underscore""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
all_jschecks = get_chname_alljschecks()
user_msg.append("""Could Not Update Checking Function""")
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
title = "Available WebSubmit Checking Functions"
return (title, body)
## Commit updated Check details to WebSubmit DB:
err_code = update_jscheck_details(chname, chdesc)
if err_code == 0:
## Check Updated: Show All Check Details Again
user_msg.append("""'%s' Check Updated""" % (chname,))
jscheck_dets = get_jscheck_details(chname)
body = websubmitadmin_templates.tmpl_display_addjscheckform(chname=jscheck_dets[0][0],
chdesc=jscheck_dets[0][1],
perform_act="jscheckedit",
cd=jscheck_dets[0][2],
md=jscheck_dets[0][3],
user_msg=user_msg
)
else:
## Could Not Update Check: Maybe Key Violation, or Invalid chname? Redisplay all Checks.
## TODO : LOGGING
all_jschecks = get_chname_alljschecks()
user_msg.append("""Could Not Update Checking Function '%s'""" % (chname,))
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks, user_msg=user_msg)
title = "Available WebSubmit Checking Functions"
else:
## Display Web form containing existing details of Check:
(title, body) = _display_edit_check_form(chname=chname)
return (title, body)
def _display_edit_action_form(actid, user_msg=""):
title = "Edit WebSubmit Action"
if user_msg == "":
user_msg = []
action_dets = get_action_details(actid)
num_rows_ret = len(action_dets)
if num_rows_ret == 1:
## Display action details
body = websubmitadmin_templates.tmpl_display_addactionform(actid=action_dets[0][0],
actname=action_dets[0][1],
working_dir=action_dets[0][2],
status_text=action_dets[0][3],
perform_act="actionedit",
cd=action_dets[0][4],
md=action_dets[0][5],
user_msg=user_msg)
else:
## Either no rows, or more than one row for action: log error, and display all actions
## TODO : LOGGING
title = "Available WebSubmit Actions"
all_actions = get_actid_actname_allactions()
if num_rows_ret > 1:
## Key Error - duplicated actid
user_msg.append("""Found Several Rows for Action with ID '%s' - Inform Administrator""" % (actid,))
## LOG MESSAGE
else:
## No rows for action
user_msg.append("""Could Not Find Any Rows for Action with ID '%s'""" % (actid,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_allactions(all_actions, user_msg=user_msg)
return (title, body)
def perform_request_edit_action(actid, actname=None, working_dir=None, status_text=None, actcommit=""):
"""Interface for editing and updating the details of a WebSubmit action.
If only "actid" provided, will display the details of an action in a Web form.
If "actname" not empty, will assume that this is a call to commit update to action details.
@param actid: unique id for action
@param actname: modified value for WebSubmit action name/description (presence invokes update)
@param working_dir: modified value for WebSubmit action working_dir
@param status_text: modified value for WebSubmit action status text
@return: tuple containing "title" (title of page), body (page body).
"""
user_msg = []
body = ""
title = "Edit WebSubmit Action"
commit_error = 0
## wash args:
if actid is not None:
try:
actid = wash_single_urlarg(urlarg=actid, argreqdtype=str, argdefault="", maxstrlen=3, minstrlen=3)
if string_is_alphanumeric_including_underscore(txtstring=actid) == 0:
actid = ""
except ValueError, e:
actid = ""
actid = actid.upper()
else:
actid = ""
if actname is not None:
try:
actname = wash_single_urlarg(urlarg=actname, argreqdtype=str, argdefault="")
except ValueError, e:
actname = ""
else:
actname = ""
if working_dir is not None:
try:
working_dir = wash_single_urlarg(urlarg=working_dir, argreqdtype=str, argdefault="")
except ValueError, e:
working_dir = ""
else:
working_dir = ""
if status_text is not None:
try:
status_text = wash_single_urlarg(urlarg=status_text, argreqdtype=str, argdefault="")
except ValueError, e:
status_text = ""
else:
status_text = ""
## process request:
if actcommit != "" and actcommit is not None:
if actname in ("", None):
actname = ""
user_msg.append("""Action description is mandatory""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
(title, body) = _display_edit_action_form(actid=actid, user_msg=user_msg)
return (title, body)
## Commit updated action details to WebSubmit DB:
err_code = update_action_details(actid, actname, working_dir, status_text)
if err_code == 0:
## Action Updated: Show Action Details Again
user_msg.append("""'%s' Action Updated""" % (actid,))
action_dets = get_action_details(actid)
body = websubmitadmin_templates.tmpl_display_addactionform(actid=action_dets[0][0],
actname=action_dets[0][1],
working_dir=action_dets[0][2],
status_text=action_dets[0][3],
perform_act="actionedit",
cd=action_dets[0][4],
md=action_dets[0][5],
user_msg=user_msg
)
else:
## Could Not Update Action: Maybe Key Violation, or Invalid actid? Redisplay all actions.
## TODO : LOGGING
all_actions = get_actid_actname_allactions()
user_msg.append("""Could Not Update Action '%s'""" % (actid,))
body = websubmitadmin_templates.tmpl_display_allactions(all_actions, user_msg=user_msg)
title = "Available WebSubmit Actions"
else:
## Display Web form containing existing details of action:
(title, body) = _display_edit_action_form(actid=actid)
return (title, body)
def _functionedit_display_function_details(funcname, user_msg=""):
"""Display the details of a function, along with any message to the user that may have been provided.
@param funcname: unique name of function to be updated
@param user_msg: Any message to the user that is to be displayed on the page.
@return: tuple containing (page title, HTML page body).
"""
if user_msg == "":
user_msg = []
title = "Edit WebSubmit Function"
func_descr_res = get_function_description(function=funcname)
num_rows_ret = len(func_descr_res)
if num_rows_ret == 1:
## Display action details
funcdescr = func_descr_res[0][0]
if funcdescr is None:
funcdescr = ""
## get parameters for this function:
this_function_parameters = get_function_parameters(function=funcname)
## get all function parameters in WebSubmit:
all_function_parameters = get_distinct_paramname_all_websubmit_function_parameters()
## get the docstring of the function. Remove leading empty
## lines and remove unnecessary leading whitespaces
docstring = None
try:
websubmit_function = __import__('invenio.websubmit_functions.%s' % funcname,
globals(), locals(), [funcname])
if hasattr(websubmit_function, funcname) and getattr(websubmit_function, funcname).__doc__:
docstring = getattr(websubmit_function, funcname).__doc__
except Exception, e:
docstring = '''<span style="color:#f00;font-weight:700">Function documentation could
not be loaded</span>.<br/>Please check function definition. Error was:<br/>%s''' % str(e)
if docstring:
docstring = '<pre style="max-height:500px;overflow: auto;">' + _format_function_docstring(docstring) + '</pre>'
body = websubmitadmin_templates.tmpl_display_addfunctionform(funcname=funcname,
funcdescr=funcdescr,
func_parameters=this_function_parameters,
all_websubmit_func_parameters=all_function_parameters,
perform_act="functionedit",
user_msg=user_msg,
func_docstring = docstring
)
else:
## Either no rows, or more than one row for function: log error, and display all functions
## TODO : LOGGING
title = "Available WebSubmit Functions"
all_functions = get_funcname_funcdesc_allfunctions()
if num_rows_ret > 1:
## Key Error - duplicated function name
user_msg.append("""Found Several Rows for Function with Name '%s' - Inform Administrator""" % (funcname,))
## LOG MESSAGE
else:
## No rows for function
user_msg.append("""Could Not Find Any Rows for Function with Name '%s'""" % (funcname,))
## LOG MESSAGE
body = websubmitadmin_templates.tmpl_display_allfunctions(all_functions, user_msg=user_msg)
return (title, body)
def _format_function_docstring(docstring):
"""
Remove unnecessary leading and trailing empty lines, as well as
meaningless leading and trailing whitespaces on every lines
@param docstring: the input docstring to format
@type docstring: string
@return: a formatted docstring
@rtype: string
"""
def count_leading_whitespaces(line):
"Count enumber of leading whitespaces"
line_length = len(line)
pos = 0
while pos < line_length and line[pos] == " ":
pos += 1
return pos
new_docstring_list = []
min_nb_leading_whitespace = len(docstring) # this is really the max possible
# First count min number of leading whitespaces of all lines. Also
# remove leading empty lines.
docstring_has_started_p = False
for line in docstring.splitlines():
if docstring_has_started_p or line.strip():
# A non-empty line has been found, or an emtpy line after
# the beginning of some text was found
docstring_has_started_p = True
new_docstring_list.append(line)
if line.strip():
# If line has some meaningful char, count leading whitespaces
line_nb_spaces = count_leading_whitespaces(line)
if line_nb_spaces < min_nb_leading_whitespace:
min_nb_leading_whitespace = line_nb_spaces
return '\n'.join([line[min_nb_leading_whitespace:] for line in new_docstring_list]).rstrip()
def _functionedit_update_description(funcname, funcdescr):
"""Perform an update of the description for a given function.
@param funcname: unique name of function to be updated
@param funcdescr: description to be updated for funcname
@return: a tuple containing (page title, HTML body content)
"""
user_msg = []
err_code = update_function_description(funcname, funcdescr)
if err_code == 0:
## Function updated - redisplay
user_msg.append("""'%s' Function Description Updated""" % (funcname,))
else:
## Could not update function description
## TODO : ERROR LIBS
user_msg.append("""Could Not Update Description for Function '%s'""" % (funcname,))
## Display function details
(title, body) = _functionedit_display_function_details(funcname=funcname, user_msg=user_msg)
return (title, body)
def _functionedit_delete_parameter(funcname, deleteparam):
"""Delete a parameter from a given function.
Important: if any document types have been using the function from which this parameter will be deleted,
and therefore have values for this parameter, these values will not be deleted from the WebSubmit DB.
The deleted parameter therefore may continue to exist in the WebSubmit DB, but will be disassociated
from this function.
@param funcname: unique name of the function from which the parameter is to be deleted.
@param deleteparam: the name of the parameter to be deleted from the function.
@return: tuple containing (title, HTML body content)
"""
user_msg = []
err_code = delete_function_parameter(function=funcname, parameter_name=deleteparam)
if err_code == 0:
## Parameter deleted - redisplay function details
user_msg.append("""'%s' Parameter Deleted from '%s' Function""" % (deleteparam, funcname))
else:
## could not delete param - it does not exist for this function
## TODO : ERROR LIBS
user_msg.append("""'%s' Parameter Does not Seem to Exist for Function '%s' - Could not Delete""" \
% (deleteparam, funcname))
## Display function details
(title, body) = _functionedit_display_function_details(funcname=funcname, user_msg=user_msg)
return (title, body)
def _functionedit_add_parameter(funcname, funceditaddparam="", funceditaddparamfree=""):
"""Add (connect) a parameter to a given WebSubmit function.
@param funcname: unique name of the function to which the parameter is to be added.
@param funceditaddparam: the value of a HTML select list: if present, will contain the name of the
parameter to be added to the function. May also be empty - the user may have used the free-text field
(funceditaddparamfree) to manually enter the name of a parameter. The important thing is that one
must be present for the parameter to be added sucessfully.
@param funceditaddparamfree: The name of the parameter to be added to the function, as taken from a free-
text HTML input field. May also be empty - the user may have used the HTML select-list (funceditaddparam)
field to choose the parameter. The important thing is that one must be present for the parameter to be
added sucessfully. The value "funceditaddparamfree" value will take priority over the "funceditaddparam"
list value.
@return: tuple containing (title, HTML body content)
"""
user_msg = []
if funceditaddparam in ("", None, "NO_VALUE") and funceditaddparamfree in ("", None):
## no parameter chosen
## TODO : ERROR LIBS
user_msg.append("""Unable to Find the Parameter to be Added to Function '%s' - Could not Add""" % (funcname,))
else:
add_parameter = ""
if funceditaddparam not in ("", None) and funceditaddparamfree not in ("", None):
## both select box and free-text values provided for parameter - prefer free-text
add_parameter = funceditaddparamfree
elif funceditaddparam not in ("", None):
## take add select-box chosen parameter
add_parameter = funceditaddparam
else:
## take add free-text chosen parameter
add_parameter = funceditaddparamfree
## attempt to commit parameter:
err_code = add_function_parameter(function=funcname, parameter_name=add_parameter)
if err_code == 0:
## Parameter added - redisplay function details
user_msg.append("""'%s' Parameter Added to '%s' Function""" % (add_parameter, funcname))
else:
## could not add param - perhaps it already exists for this function
## TODO : ERROR LIBS
user_msg.append("""Could not Add '%s' Parameter to Function '%s' - It Already Exists for this Function""" \
% (add_parameter, funcname))
## Display function details
(title, body) = _functionedit_display_function_details(funcname=funcname, user_msg=user_msg)
return (title, body)
def perform_request_edit_function(funcname, funcdescr=None, funceditaddparam=None, funceditaddparamfree=None,
funceditdelparam=None, funcdescreditcommit="", funcparamdelcommit="",
funcparamaddcommit=""):
"""Edit a WebSubmit function. 3 possibilities: edit the function description; delete a parameter from the
function; add a new parameter to the function.
@param funcname: the name of the function to be modified
@param funcdescr: the new function description
@param funceditaddparam: the name of the parameter to be added to the function (taken from HTML SELECT-list)
@param funceditaddparamfree: the name of the parameter to be added to the function (taken from free-text input)
@param funceditdelparam: the name of the parameter to be deleted from the function
@param funcdescreditcommit: a flag to indicate that this request is to update the description of a function
@param funcparamdelcommit: a flag to indicate that this request is to delete a parameter from a function
@param funcparamaddcommit: a flag to indicate that this request is to add a new parameter to a function
@return: tuple containing (page title, HTML page body)
"""
body = ""
title = "Edit WebSubmit Function"
commit_error = 0
## wash args:
if funcname is not None:
try:
funcname = wash_single_urlarg(urlarg=funcname, argreqdtype=str, argdefault="")
if string_is_alphanumeric_including_underscore(txtstring=funcname) == 0:
funcname = ""
except ValueError, e:
funcname = ""
else:
funcname = ""
if funcdescr is not None:
try:
funcdescr = wash_single_urlarg(urlarg=funcdescr, argreqdtype=str, argdefault="")
except ValueError, e:
funcdescr = ""
else:
funcdescr = ""
if funceditaddparam is not None:
try:
funceditaddparam = wash_single_urlarg(urlarg=funceditaddparam, argreqdtype=str, argdefault="")
if string_is_alphanumeric_including_underscore(txtstring=funceditaddparam) == 0:
funceditaddparam = ""
except ValueError, e:
funceditaddparam = ""
else:
funceditaddparam = ""
if funceditaddparamfree is not None:
try:
funceditaddparamfree = wash_single_urlarg(urlarg=funceditaddparamfree, argreqdtype=str, argdefault="")
if string_is_alphanumeric_including_underscore(txtstring=funceditaddparamfree) == 0:
funceditaddparamfree = ""
except ValueError, e:
funceditaddparamfree = ""
else:
funceditaddparamfree = ""
if funceditdelparam is not None:
try:
funceditdelparam = wash_single_urlarg(urlarg=funceditdelparam, argreqdtype=str, argdefault="")
except ValueError, e:
funceditdelparam = ""
else:
funceditdelparam = ""
if funcname == "":
(title, body) = _functionedit_display_function_details(funcname=funcname)
return (title, body)
if funcdescreditcommit != "" and funcdescreditcommit is not None:
## Update the definition of a function:
(title, body) = _functionedit_update_description(funcname=funcname, funcdescr=funcdescr)
elif funcparamaddcommit != "" and funcparamaddcommit is not None:
## Request to add a new parameter to a function
(title, body) = _functionedit_add_parameter(funcname=funcname,
funceditaddparam=funceditaddparam, funceditaddparamfree=funceditaddparamfree)
elif funcparamdelcommit != "" and funcparamdelcommit is not None:
## Request to delete a parameter from a function
(title, body) = _functionedit_delete_parameter(funcname=funcname, deleteparam=funceditdelparam)
else:
## Display Web form for new function addition:
(title, body) = _functionedit_display_function_details(funcname=funcname)
return (title, body)
def perform_request_function_usage(funcname):
"""Display a page containing the usage details of a given function.
@param funcname: the function name
@return: page body
"""
func_usage = get_function_usage_details(function=funcname)
func_usage = stringify_listvars(func_usage)
body = websubmitadmin_templates.tmpl_display_function_usage(funcname, func_usage)
return body
def perform_request_list_actions():
"""Display a list of all WebSubmit actions.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_actions = get_actid_actname_allactions()
body = websubmitadmin_templates.tmpl_display_allactions(all_actions)
return body
def perform_request_list_doctypes():
"""Display a list of all WebSubmit document types.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(all_doctypes)
return body
def perform_request_list_jschecks():
"""Display a list of all WebSubmit JavaScript element checking functions.
@return: body, where body is a string of HTML, which is a page body.
"""
body = ""
all_jschecks = get_chname_alljschecks()
body = websubmitadmin_templates.tmpl_display_alljschecks(all_jschecks)
return body
def perform_request_list_functions():
"""Display a list of all WebSubmit FUNCTIONS.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_functions = get_funcname_funcdesc_allfunctions()
body = websubmitadmin_templates.tmpl_display_allfunctions(all_functions)
return body
def perform_request_list_elements():
"""Display a list of all WebSubmit ELEMENTS.
@return: body where body is a string of HTML, which is a page body.
"""
body = ""
all_elements = get_elename_allelements()
body = websubmitadmin_templates.tmpl_display_allelements(all_elements)
return body
def _remove_doctype(doctype):
"""Process removal of a document type.
@param doctype: the document type to be removed.
@return: a tuple containing page title, and HTML page body)
"""
title = ""
body = ""
user_msg = []
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype == 1:
## Doctype is unique and can therefore be deleted:
## Delete any function parameters for this document type:
error_code = delete_all_parameters_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all parameters - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all function parameter values for document type "%s".""" % (doctype,))
## delete all functions called by this doctype's actions
error_code = delete_all_functions_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all functions - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all functions for document type "%s".""" % (doctype,))
## delete all categories of this doctype
error_code = delete_all_categories_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all categories - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all parameters for document type "%s".""" % (doctype,))
## delete all submission interface fields for this doctype
error_code = delete_all_submissionfields_doctype(doctype=doctype)
if error_code != 0:
## problem deleting some or all submission fields - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all submission fields for document type "%s".""" % (doctype,))
## delete all submissions for this doctype
error_code = delete_all_submissions_doctype(doctype)
if error_code != 0:
## problem deleting some or all submissions - inform user and log error
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete some or all submissions for document type "%s".""" % (doctype,))
## delete entry for this doctype in the collection-doctypes table
error_code = delete_collection_doctype_entry_doctype(doctype)
if error_code != 0:
## problem deleting this doctype from the collection-doctypes table
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete document type "%s" from the collection-doctypes table.""" % (doctype,))
## delete the doctype itself
error_code = delete_doctype(doctype)
if error_code != 0:
## problem deleting this doctype from the doctypes table
## TODO : ERROR LOGGING
user_msg.append("""Unable to delete document type "%s" from the document types table.""" % (doctype,))
user_msg.append("""The "%s" document type should now have been deleted, but you should not ignore any warnings.""" % (doctype,))
title = """Available WebSubmit Document Types"""
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
else:
## doctype is not unique and cannot be deleted
if numrows_doctype > 1:
## doctype is duplicated - cannot delete - needs admin intervention
## TODO : LOG ERROR
user_msg.append("""%s WebSubmit document types have been identified for doctype id "%s" - unable to delete.""" \
""" Please inform administrator.""" % (numrows_doctype, doctype))
else:
## no document types found for this doctype id
## TODO : LOG ERROR
user_msg.append("""Unable to find any document types in the WebSubmit database for doctype id "%s" - unable to delete""" \
% (doctype,))
## get a list of all document types, and once more display the delete form, with the message
alldoctypes = get_docid_docname_and_docid_alldoctypes()
title = "Remove WebSubmit Doctument Type"
body = websubmitadmin_templates.tmpl_display_delete_doctype_form(doctype="", alldoctypes=alldoctypes, user_msg=user_msg)
return (title, body)
def perform_request_remove_doctype(doctype="", doctypedelete="", doctypedeleteconfirm=""):
"""Remove a document type from WebSubmit.
@param doctype: the document type to be removed
@doctypedelete: flag to signal that a confirmation for deletion should be displayed
@doctypedeleteconfirm: flag to signal that confirmation for deletion has been received and
the doctype should be removed
@return: a tuple (title, body)
"""
body = ""
title = "Remove WebSubmit Document Type"
if doctypedeleteconfirm not in ("", None):
## Delete the document type:
(title, body) = _remove_doctype(doctype=doctype)
else:
## Display "doctype delete form"
if doctypedelete not in ("", None) and doctype not in ("", None):
## don't bother to get list of doctypes - user will be prompted to confirm the deletion of "doctype"
alldoctypes = None
else:
## get list of all doctypes to pass to template so that it can prompt the user to choose a doctype to delete
## alldoctypes = get_docid_docname_alldoctypes()
alldoctypes = get_docid_docname_and_docid_alldoctypes()
body = websubmitadmin_templates.tmpl_display_delete_doctype_form(doctype=doctype, alldoctypes=alldoctypes)
return (title, body)
def _create_add_doctype_form(doctype="", doctypename="", doctypedescr="", clonefrom="", user_msg=""):
"""Perform the steps necessary to create the "add a new doctype" form.
@param doctype: The unique ID that is to be used for the new doctype.
@param doctypename: the name that is to be given to a doctype.
@param doctypedescr: the description to be allocated to the new doctype.
@param user_msg: any message to be displayed to the user.
@return: a tuple containing page title and HTML body of page: (title, body)
"""
title = """Add New WebSubmit Document Type"""
alldoctypes = get_docid_docname_and_docid_alldoctypes()
body = websubmitadmin_templates.tmpl_display_doctypedetails_form(doctype=doctype,
doctypename=doctypename,
doctypedescr=doctypedescr,
clonefrom=clonefrom,
alldoctypes=alldoctypes,
user_msg=user_msg
)
return (title, body)
def _clone_categories_doctype(user_msg, fromdoctype, todoctype):
"""Clone the categories of one document type, to another document type.
@param user_msg: any message to be displayed to the user (this is a list)
@param fromdoctype: the doctype from which categories are to be cloned
@param todoctype: the doctype into which categories are to be cloned
@return: integer value (0/1/2) - if doctype's categories couldn't be deleted, return 0 (cloning failed);
if some categories could be cloned, return 1 (cloning partially successful); if all categories could be
cloned, return 2 (cloning successful).
"""
error_code = clone_categories_fromdoctype_todoctype(fromdoctype=fromdoctype, todoctype=todoctype)
if error_code == 1:
## doctype had existing categories and they could not be deleted
## TODO : LOG ERRORS
user_msg.append("""Categories already existed for the document type "%s" but could not be deleted. Unable to clone""" \
""" categories of doctype "%s".""" % (todoctype, fromdoctype))
return 1 ## cloning failed
elif error_code == 2:
## could not clone all categories for new doctype
## TODO : LOG ERRORS
user_msg.append("""Unable to clone all categories from doctype "%s", for doctype "%s".""" % (fromdoctype, todoctype))
return 2 ## cloning at least partially successful
else:
return 0 ## cloning successful
def _clone_functions_foraction_doctype(user_msg, fromdoctype, todoctype, action):
"""Clone the functions of a given action of one document type, to the same action on another document type.
@param user_msg: any message to be displayed to the user (this is a list)
@param fromdoctype: the doctype from which functions are to be cloned
@param todoctype: the doctype into which functions are to be cloned
@param action: the action for which functions are to be cloned
@return: an integer value (0/1/2). In the case that todoctype had existing functions for the given action and
they could not be deleted return 0, signalling that this is a serious problem; in the case that some
functions were cloned, return 1; in the case that all functions were cloned, return 2.
"""
error_code = clone_functions_foraction_fromdoctype_todoctype(fromdoctype=fromdoctype, todoctype=todoctype, action=action)
if error_code == 1:
## doctype had existing functions for the given action and they could not be deleted
## TODO : LOG ERRORS
user_msg.append("""Functions already existed for the "%s" action of the document type "%s" but they could not be """ \
"""deleted. Unable to clone the functions of Document Type "%s" for action "%s".""" \
% (action, todoctype, fromdoctype, action))
## critical - return 1 to signal this
return 1
elif error_code == 2:
## could not clone all functions for given action for new doctype
## TODO : LOG ERRORS
user_msg.append("""Unable to clone all functions for the "%s" action from doctype "%s", for doctype "%s".""" \
% (action, fromdoctype, todoctype))
return 2 ## not critical
else:
return 0 ## total success
def _clone_functionparameters_foraction_fromdoctype_todoctype(user_msg, fromdoctype, todoctype, action):
"""Clone the parameters/values of a given action of one document type, to the same action on another document type.
@param user_msg: any message to be displayed to the user (this is a list)
@param fromdoctype: the doctype from which parameters are to be cloned
@param todoctype: the doctype into which parameters are to be cloned
@param action: the action for which parameters are to be cloned
@return: 0 if it was not possible to clone all parameters/values; 1 if all parameters/values were cloned successfully.
"""
error_code = clone_functionparameters_foraction_fromdoctype_todoctype(fromdoctype=fromdoctype, \
todoctype=todoctype, action=action)
if error_code in (1, 2):
## something went wrong and it was not possible to clone all parameters/values of "action"/"fromdoctype" for "action"/"todoctype"
## TODO : LOG ERRORS
user_msg.append("""It was not possible to clone all parameter values from the action "%(act)s" of the document type""" \
""" "%(fromdt)s" for the action "%(act)s" of the document type "%(todt)s".""" \
% { 'act' : action, 'fromdt' : fromdoctype, 'todt' : todoctype }
)
return 2 ## to signal that addition wasn't 100% successful
else:
return 0 ## all parameters were cloned
def _add_doctype(doctype, doctypename, doctypedescr, clonefrom):
title = ""
body = ""
user_msg = []
commit_error = 0
if doctype == "":
user_msg.append("""The Document Type ID is mandatory and must be a string with no more than 10 alpha-numeric characters""")
commit_error = 1
if commit_error != 0:
## don't commit - just re-display page with message to user
(title, body) = _create_add_doctype_form(doctypename=doctypename, doctypedescr=doctypedescr, clonefrom=clonefrom, user_msg=user_msg)
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 0:
## this document type already exists - do not add
## TODO : LOG ERROR
user_msg.append("""A document type identified by "%s" already seems to exist and there cannot be added. Choose another ID.""" \
% (doctype,))
(title, body) = _create_add_doctype_form(doctypename=doctypename, doctypedescr=doctypedescr, clonefrom=clonefrom, user_msg=user_msg)
else:
## proceed with addition
## add the document type details:
error_code = insert_doctype_details(doctype=doctype, doctypename=doctypename, doctypedescr=doctypedescr)
if error_code == 0:
## added successfully
if clonefrom not in ("", "None", None):
## document type should be cloned from "clonefrom"
## first, clone the categories from another doctype:
error_code = _clone_categories_doctype(user_msg=user_msg,
fromdoctype=clonefrom,
todoctype=doctype)
## get details of clonefrom's submissions
all_actnames_submissions_clonefrom = get_actname_all_submissions_doctype(doctype=clonefrom)
if len(all_actnames_submissions_clonefrom) > 0:
## begin cloning
for doc_submission_actname in all_actnames_submissions_clonefrom:
## clone submission details:
action_name = doc_submission_actname[0]
_clone_submission_fromdoctype_todoctype(user_msg=user_msg,
todoctype=doctype, action=action_name, clonefrom=clonefrom)
user_msg.append("""The "%s" document type has been added.""" % (doctype,))
title = """Available WebSubmit Document Types"""
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
else:
## could not add document type details - do no more
## TODO : LOG ERROR!
user_msg.append("""Unable to add details for document type "%s".""" % (doctype,))
(title, body) = _create_add_doctype_form(user_msg=user_msg)
return (title, body)
def perform_request_add_doctype(doctype=None, doctypename=None, doctypedescr=None, clonefrom=None, doctypedetailscommit=""):
body = ""
## wash args:
if doctype is not None:
try:
doctype = wash_single_urlarg(urlarg=doctype, argreqdtype=str, argdefault="", maxstrlen=10, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=doctype) == 0:
doctype = ""
except ValueError, e:
doctype = ""
else:
doctype = ""
if doctypename is not None:
try:
doctypename = wash_single_urlarg(urlarg=doctypename, argreqdtype=str, argdefault="")
except ValueError, e:
doctypename = ""
else:
doctypename = ""
if doctypedescr is not None:
try:
doctypedescr = wash_single_urlarg(urlarg=doctypedescr, argreqdtype=str, argdefault="")
except ValueError, e:
doctypedescr = ""
else:
doctypedescr = ""
if clonefrom is not None:
try:
clonefrom = wash_single_urlarg(urlarg=clonefrom, argreqdtype=str, argdefault="None")
except ValueError, e:
clonefrom = "None"
else:
clonefrom = "None"
if doctypedetailscommit not in ("", None):
(title, body) = _add_doctype(doctype=doctype,
doctypename=doctypename, doctypedescr=doctypedescr, clonefrom=clonefrom)
else:
(title, body) = _create_add_doctype_form()
return (title, body)
def _delete_referee_doctype(doctype, categid, refereeid):
"""Delete a referee from a given category of a document type.
@param doctype: the document type from whose category the referee is to be removed
@param categid: the name/ID of the category from which the referee is to be removed
@param refereeid: the id of the referee to be removed from the given category
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
role_name = """referee_%s_%s""" % (doctype, categid)
error_code = acc_delete_user_role(id_user=refereeid, name_role=role_name)
if error_code > 0:
## referee was deleted from category
user_msg.append(""" "%s".""" % (doctype,))
def _create_list_referees_doctype(doctype):
referees = {}
referees_details = {}
## get all Invenio roles:
all_roles = acc_get_all_roles()
for role in all_roles:
(roleid, rolename) = (role[0], role[1])
if re.match("^referee_%s_" % (doctype,), rolename):
## this is a "referee" role - get users of this role:
role_users = acc_get_role_users(roleid)
if role_users is not None and (type(role_users) in (tuple, list) and len(role_users) > 0):
## this role has users, record them in dictionary:
referees[rolename] = role_users
## for each "group" of referees:
for ref_role in referees.keys():
## get category ID for this referee-role:
try:
categid = re.match("^referee_%s_(.*)" % (doctype,), ref_role).group(1)
## from WebSubmit DB, get categ name for "categid":
if categid != "*":
categ_details = get_all_categories_sname_lname_for_doctype_categsname(doctype=doctype, categsname=categid)
if len(categ_details) > 0:
## if possible to receive details of this category, record them in a tuple in the format:
## ("categ name", (tuple of users details)):
referees_details[ref_role] = (categid, categ_details[0][1], referees[ref_role])
else:
## general referee entry:
referees_details[ref_role] = (categid, "General Referee(s)", referees[ref_role])
except AttributeError:
## there is no category for this role - it is broken, so pass it
pass
return referees_details
def _create_edit_doctype_details_form(doctype, doctypename="", doctypedescr="", doctypedetailscommit="", user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
title = "Edit Document Type Details"
doctype_details = get_doctype_docname_descr_cd_md_fordoctype(doctype)
if len(doctype_details) == 1:
docname = doctype_details[0][1]
docdescr = doctype_details[0][2]
(cd, md) = (doctype_details[0][3], doctype_details[0][4])
if doctypedetailscommit != "":
## could not commit details
docname = doctypename
docdescr = doctypedescr
body = websubmitadmin_templates.tmpl_display_doctypedetails_form(doctype=doctype,
doctypename=docname,
doctypedescr=docdescr,
cd=cd,
md=md,
user_msg=user_msg,
perform_act="doctypeconfigure")
else:
## problem retrieving details of doctype:
user_msg.append("""Unable to retrieve details of doctype '%s' - cannot edit.""" % (doctype,),)
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
def _create_add_submission_choose_clonefrom_form(doctype, action, user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
if action in ("", None):
user_msg.append("""Unknown Submission""")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## does this doctype already have this action?
numrows_doctype_action = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_doctype_action < 1:
## action not present for this doctype - can be added
## get list of all doctypes implementing this action (for possible cloning purposes)
doctypes_implementing_action = get_doctypeid_doctypes_implementing_action(action=action)
## create form to display document types to clone from
title = "Add Submission '%s' to Document Type '%s'" % (action, doctype)
body = websubmitadmin_templates.tmpl_display_submission_clone_form(doctype=doctype,
action=action,
clonefrom_list=doctypes_implementing_action,
user_msg=user_msg
)
else:
## warn user that action already exists for doctype and canot be added, then display all
## details of doctype again
user_msg.append("The Document Type '%s' already implements the Submission '%s' - cannot add it again" \
% (doctype, action))
## TODO : LOG WARNING
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_add_submission_form(doctype, action, displayed="", buttonorder="", statustext="",
level="", score="", stpage="", endtxt="", user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
if action in ("", None):
user_msg.append("""Unknown Submission""")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
title = "Add Submission '%s' to Document Type '%s'" % (action, doctype)
body = websubmitadmin_templates.tmpl_display_submissiondetails_form(doctype=doctype,
action=action,
displayed=displayed,
buttonorder=buttonorder,
statustext=statustext,
level=level,
score=score,
stpage=stpage,
endtxt=endtxt,
user_msg=user_msg,
saveaction="add"
)
return (title, body)
def _create_delete_submission_form(doctype, action):
user_msg = []
title = """Delete Submission "%s" from Document Type "%s" """ % (action, doctype)
numrows_doctypesubmission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_doctypesubmission > 0:
## submission exists: create form to delete it:
body = websubmitadmin_templates.tmpl_display_delete_doctypesubmission_form(doctype=doctype, action=action)
else:
## submission doesn't seem to exist. Display details of doctype only:
user_msg.append("""The Submission "%s" doesn't seem to exist for the Document Type "%s" - unable to delete it""" % (action, doctype))
## TODO : LOG ERRORS
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_edit_submission_form(doctype, action, user_msg=""):
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
elif type(user_msg) in (str, unicode):
user_msg = [user_msg]
submission_details = get_submissiondetails_doctype_action(doctype=doctype, action=action)
numrows_submission_details = len(submission_details)
if numrows_submission_details == 1:
## correctly retrieved details of submission - display:
submission_details = stringify_listvars(submission_details)
displayed = submission_details[0][3]
buttonorder = submission_details[0][7]
statustext = submission_details[0][8]
level = submission_details[0][9]
score = submission_details[0][10]
stpage = submission_details[0][11]
endtxt = submission_details[0][12]
cd = submission_details[0][5]
md = submission_details[0][6]
title = "Edit Details of '%s' Submission of '%s' Document Type" % (action, doctype)
body = websubmitadmin_templates.tmpl_display_submissiondetails_form(doctype=doctype,
action=action,
displayed=displayed,
buttonorder=buttonorder,
statustext=statustext,
level=level,
score=score,
stpage=stpage,
endtxt=endtxt,
cd=cd,
md=md,
user_msg=user_msg
)
else:
if numrows_submission_details > 1:
## multiple rows for this submission - this is a key violation
user_msg.append("Found multiple rows for the Submission '%s' of the Document Type '%s'" \
% (action, doctype))
else:
## submission does not exist
user_msg.append("The Submission '%s' of the Document Type '%s' doesn't seem to exist." \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_edit_category_form(doctype, categid):
title = "Edit Category Description"
categ_details = get_all_categories_sname_lname_for_doctype_categsname(doctype=doctype, categsname=categid)
if len(categ_details) == 1:
## disaply details
retrieved_categid=categ_details[0][0]
retrieved_categdescr=categ_details[0][1]
body = websubmitadmin_templates.tmpl_display_edit_category_form(doctype=doctype,
categid=retrieved_categid,
categdescr=retrieved_categdescr
)
else:
## problem retrieving details of categ
user_msg = """Unable to retrieve details of category '%s'""" % (categid,)
## TODO : LOG ERRORS
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_configure_doctype_form(doctype, jumpcategout="", user_msg=""):
title = "Configure Document Type"
body = ""
if user_msg == "" or type(user_msg) not in (list, tuple, str, unicode):
user_msg = []
## get details of doctype:
doctype_details = get_doctype_docname_descr_cd_md_fordoctype(doctype)
docname = doctype_details[0][1]
docdescr = doctype_details[0][2]
(cd, md) = (doctype_details[0][3], doctype_details[0][4])
## get categories for doctype:
doctype_categs = get_all_category_details_for_doctype(doctype=doctype)
## get submissions for doctype:
doctype_submissions = get_submissiondetails_all_submissions_doctype(doctype=doctype)
## get list of actions that this doctype doesn't have:
unlinked_actions = get_actions_sname_lname_not_linked_to_doctype(doctype=doctype)
## get referees for doctype:
referees_dets = _create_list_referees_doctype(doctype=doctype)
body = websubmitadmin_templates.tmpl_configure_doctype_overview(doctype=doctype, doctypename=docname,
doctypedescr=docdescr, doctype_cdate=cd,
doctype_mdate=md, doctype_categories=doctype_categs,
jumpcategout=jumpcategout,
doctype_submissions=doctype_submissions,
doctype_referees=referees_dets,
add_actions_list=unlinked_actions,
user_msg=user_msg)
return (title, body)
def _clone_submission_fromdoctype_todoctype(user_msg, todoctype, action, clonefrom):
## first, delete the submission from todoctype (if it exists):
error_code = delete_submissiondetails_doctype(doctype=todoctype, action=action)
if error_code == 0:
## could be deleted - now clone it
error_code = insert_submission_details_clonefrom_submission(addtodoctype=todoctype, action=action, clonefromdoctype=clonefrom)
if error_code == 0:
## submission inserted
## now clone functions:
error_code = _clone_functions_foraction_doctype(user_msg=user_msg, \
fromdoctype=clonefrom, todoctype=todoctype, action=action)
if error_code in (0, 2):
## no serious error - clone parameters:
error_code = _clone_functionparameters_foraction_fromdoctype_todoctype(user_msg=user_msg,
fromdoctype=clonefrom,
todoctype=todoctype,
action=action)
## now clone pages/elements
error_code = clone_submissionfields_from_doctypesubmission_to_doctypesubmission(fromsub="%s%s" % (action, clonefrom),
tosub="%s%s" % (action, todoctype))
if error_code == 1:
## could not delete all existing submission fields and therefore could no clone submission fields at all
## TODO : LOG ERROR
user_msg.append("""Unable to delete existing submission fields for Submission "%s" of Document Type "%s" - """ \
"""cannot clone submission fields!""" % (action, todoctype))
elif error_code == 2:
## could not clone all fields
## TODO : LOG ERROR
user_msg.append("""Unable to clone all submission fields for submission "%s" on Document Type "%s" from Document""" \
""" Type "%s" """ % (action, todoctype, clonefrom))
else:
## could not insert submission details!
user_msg.append("""Unable to successfully insert details of submission "%s" into Document Type "%s" - cannot clone from "%s" """ \
% (action, todoctype, clonefrom))
## TODO : LOG ERROR
else:
## could not delete details of existing submission (action) from 'todoctype' - cannot clone it as new
user_msg.append("""Unable to delete details of existing Submission "%s" from Document Type "%s" - cannot clone it from "%s" """ \
% (action, todoctype, clonefrom))
## TODO : LOG ERROR
def _add_submission_to_doctype_clone(doctype, action, clonefrom):
user_msg = []
if action in ("", None) or clonefrom in ("", None):
user_msg.append("Unknown action or document type to clone from - cannot add submission")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## does action exist?
numrows_action = get_number_actions_with_actid(actid=action)
if numrows_action > 0:
## The action exists, but is it already implemented as a submission by doctype?
numrows_submission_doctype = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission_doctype > 0:
## this submission already exists for this document type - unable to add it again
user_msg.append("""The Submission "%s" already exists for Document Type "%s" - cannot add it again""" \
%(action, doctype))
## TODO : LOG ERROR
else:
## clone the submission
_clone_submission_fromdoctype_todoctype(user_msg=user_msg,
todoctype=doctype, action=action, clonefrom=clonefrom)
user_msg.append("""Cloning of Submission "%s" from Document Type "%s" has been carried out. You should not""" \
""" ignore any warnings that you may have seen.""" % (action, clonefrom))
## TODO : LOG WARNING OF NEW SUBMISSION CREATION BY CLONING
else:
## this action doesn't exist! cannot add a submission based upon it!
user_msg.append("The Action '%s' does not seem to exist in WebSubmit. Cannot add it as a Submission!" \
% (action))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _add_submission_to_doctype(doctype, action, displayed, buttonorder,
statustext, level, score, stpage, endtxt):
user_msg = []
## does "action" exist?
numrows_action = get_number_actions_with_actid(actid=action)
if numrows_action < 1:
## this action does not exist! Can't add a submission based upon it!
user_msg.append("'%s' does not exist in WebSubmit as an Action! Unable to add this submission."\
% (action,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## Insert the new submission
error_code = insert_submission_details(doctype=doctype, action=action, displayed=displayed,
nbpg="0", buttonorder=buttonorder, statustext=statustext,
level=level, score=score, stpage=stpage, endtext=endtxt)
if error_code == 0:
## successful insert
user_msg.append("""'%s' Submission Successfully Added to Document Type '%s'""" % (action, doctype))
else:
## could not insert submission into doctype
user_msg.append("""Unable to Add '%s' Submission to '%s' Document Type""" % (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _delete_submission_from_doctype(doctype, action):
"""Delete a submission (action) from the document type identified by "doctype".
@param doctype: the unique ID of the document type from which the submission is to be deleted
@param categid: the action ID of the submission to be deleted from doctype
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if action in ("", None):
user_msg.append("Unknown action - cannot delete submission")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## delete fields for this submission:
error_code = delete_all_submissionfields_submission("""%s%s""" % (action, doctype) )
if error_code != 0:
## could not successfully delete all fields - report error
user_msg.append("""When deleting Submission "%s" from Document Type "%s", it wasn't possible to delete all Submission Fields""" \
% (action, doctype))
## TODO : LOG ERROR
## delete parameters for this submission:
error_code = delete_functionparameters_doctype_submission(doctype=doctype, action=action)
if error_code != 0:
## could not successfully delete all functions - report error
user_msg.append("""When deleting Submission "%s" from Document Type "%s", it wasn't possible to delete all Function Parameters""" \
% (action, doctype))
## TODO : LOG ERROR
## delete functions for this submission:
error_code = delete_all_functions_foraction_doctype(doctype=doctype, action=action)
if error_code != 0:
## could not successfully delete all functions - report error
user_msg.append("""When deleting Submission "%s" from Document Type "%s", it wasn't possible to delete all Functions""" \
% (action, doctype))
## TODO : LOG ERROR
## delete this submission itself:
error_code = delete_submissiondetails_doctype(doctype=doctype, action=action)
if error_code == 0:
## successful delete
user_msg.append("""The "%s" Submission has been deleted from the "%s" Document Type""" % (action, doctype))
else:
## could not delete category
user_msg.append("""Unable to successfully delete the "%s" Submission from the "%s" Document Type""" % (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _edit_submission_for_doctype(doctype, action, displayed, buttonorder,
statustext, level, score, stpage, endtxt):
"""Update the details of a given submission belonging to the document type identified by "doctype".
@param doctype: the unique ID of the document type for which the submission is to be updated
@param action: action name of the submission to be updated
@param displayed: displayed on main submission page? (Y/N)
@param buttonorder: button order
@param statustext: statustext
@param level: level
@param score: score
@param stpage: stpage
@param endtxt: endtxt
@return: a tuple of 2 strings: (page title, page body)
"""
user_msg = []
commit_error = 0
if action in ("", None):
user_msg.append("Unknown Action - cannot update submission")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = update_submissiondetails_doctype_action(doctype=doctype, action=action, displayed=displayed,
buttonorder=buttonorder, statustext=statustext, level=level,
score=score, stpage=stpage, endtxt=endtxt)
if error_code == 0:
## successful update
user_msg.append("'%s' Submission of '%s' Document Type updated." % (action, doctype) )
else:
## could not update
user_msg.append("Unable to update '%s' Submission of '%s' Document Type." % (action, doctype) )
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _edit_doctype_details(doctype, doctypename, doctypedescr):
"""Update the details (name and/or description) of a document type (identified by doctype.)
@param doctype: the unique ID of the document type to be updated
@param doctypename: the new/updated name for the doctype
@param doctypedescr: the new/updated description for the doctype
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
error_code = update_doctype_details(doctype=doctype, doctypename=doctypename, doctypedescr=doctypedescr)
if error_code == 0:
## successful update
user_msg.append("""'%s' Document Type Updated""" % (doctype,))
else:
## could not update
user_msg.append("""Unable to Update Doctype '%s'""" % (doctype,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _edit_category_for_doctype(doctype, categid, categdescr):
"""Edit the description of a given category (identified by categid), belonging to
the document type identified by doctype.
@param doctype: the unique ID of the document type for which the category is to be modified
@param categid: the unique category ID of the category to be modified
@param categdescr: the new description for the category
@return: at tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if categid in ("", None) or categdescr in ("", None):
## cannot edit unknown category!
user_msg.append("Category ID and Description are both mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = update_category_description_doctype_categ(doctype=doctype, categ=categid, categdescr=categdescr)
if error_code == 0:
## successful update
user_msg.append("""'%s' Category Description Successfully Updated""" % (categid,))
else:
## could not update category description
user_msg.append("""Unable to Description for Category '%s'""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _add_category_to_doctype(doctype, categid, categdescr):
"""Add a new category to the document type identified by "doctype".
Category ID, and category description are both mandatory.
@param doctype: the unique ID of the document type to which the category is to be added
@param categid: the unique category ID of the category to be added to doctype
@param categdescr: the description of the category to be added
@return: at tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if categid in ("", None) or categdescr in ("", None):
## cannot add unknown category!
user_msg.append("Category ID and Description are both mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = insert_category_into_doctype(doctype=doctype, categ=categid, categdescr=categdescr)
if error_code == 0:
## successful insert
user_msg.append("""'%s' Category Successfully Added""" % (categid,))
else:
## could not insert category into doctype
user_msg.append("""Unable to Add '%s' Category""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _delete_category_from_doctype(doctype, categid):
"""Delete a category (categid) from the document type identified by "doctype".
@param doctype: the unique ID of the document type from which the category is to be deleted
@param categid: the unique category ID of the category to be deleted from doctype
@return: a tuple containing 2 strings: (page title, page body)
"""
user_msg = []
if categid in ("", None):
## cannot delete unknown category!
user_msg.append("Category ID is mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
error_code = delete_category_doctype(doctype=doctype, categ=categid)
if error_code == 0:
## successful delete
user_msg.append("""'%s' Category Successfully Deleted""" % (categid,))
else:
## could not delete category
user_msg.append("""Unable to Delete '%s' Category""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _jump_category_to_new_score(doctype, jumpcategout, jumpcategin):
user_msg = []
if jumpcategout in ("", None) or jumpcategin in ("", None):
## need both jumpcategout and jumpcategin to move a category:
user_msg.append("Unable to move category - unknown source and/or destination score(s)")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## FIXME TODO:
error_code = move_category_to_new_score(doctype, jumpcategout, jumpcategin)
if error_code == 0:
## successful jump of category
user_msg.append("""Successfully Moved [%s] Category""" % (jumpcategout,))
else:
## could not delete category
user_msg.append("""Unable to Move [%s] Category""" % (jumpcategout,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _move_category(doctype, categid, movecategup=""):
user_msg = []
if categid in ("", None):
## cannot move unknown category!
user_msg.append("Cannot move an unknown category - category ID is mandatory")
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
if movecategup not in ("", None):
## move the category up in score:
error_code = move_category_by_one_place_in_score(doctype=doctype,
categsname=categid,
direction="up")
else:
## move the category down in score:
error_code = move_category_by_one_place_in_score(doctype=doctype,
categsname=categid,
direction="down")
if error_code == 0:
## successful move of category
user_msg.append("""[%s] Category Successfully Moved""" % (categid,))
else:
## could not delete category
user_msg.append("""Unable to Move [%s] Category""" % (categid,))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype(doctype,
doctypename=None,
doctypedescr=None,
doctypedetailsedit="",
doctypedetailscommit="",
doctypecategoryadd="",
doctypecategoryedit="",
doctypecategoryeditcommit="",
doctypecategorydelete="",
doctypesubmissionadd="",
doctypesubmissiondelete="",
doctypesubmissiondeleteconfirm="",
doctypesubmissionedit="",
doctypesubmissionaddclonechosen="",
doctypesubmissionadddetailscommit="",
doctypesubmissioneditdetailscommit="",
categid=None,
categdescr=None,
movecategup=None,
movecategdown=None,
jumpcategout=None,
jumpcategin=None,
action=None,
doctype_cloneactionfrom=None,
displayed=None,
buttonorder=None,
statustext=None,
level=None,
score=None,
stpage=None,
endtxt=None
):
user_msg = []
body = ""
if doctype is not None:
try:
doctype = wash_single_urlarg(urlarg=doctype, argreqdtype=str, argdefault="", maxstrlen=10, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=doctype) == 0:
doctype = ""
except ValueError, e:
doctype = ""
else:
doctype = ""
if action is not None:
try:
action = wash_single_urlarg(urlarg=action, argreqdtype=str, argdefault="", maxstrlen=3, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=action) == 0:
action = ""
except ValueError, e:
action = ""
else:
action = ""
if doctypename is not None:
try:
doctypename = wash_single_urlarg(urlarg=doctypename, argreqdtype=str, argdefault="")
except ValueError, e:
doctypename = ""
else:
doctypename = ""
if doctypedescr is not None:
try:
doctypedescr = wash_single_urlarg(urlarg=doctypedescr, argreqdtype=str, argdefault="")
except ValueError, e:
doctypedescr = ""
else:
doctypedescr = ""
if categid is not None:
try:
categid = wash_single_urlarg(urlarg=categid, argreqdtype=str, argdefault="")
except ValueError, e:
categid = ""
else:
categid = ""
if categdescr is not None:
try:
categdescr = wash_single_urlarg(urlarg=categdescr, argreqdtype=str, argdefault="")
except ValueError, e:
categdescr = ""
else:
categdescr = ""
if doctype_cloneactionfrom is not None:
try:
doctype_cloneactionfrom = wash_single_urlarg(urlarg=doctype_cloneactionfrom, argreqdtype=str, argdefault="", maxstrlen=10, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=doctype_cloneactionfrom) == 0:
doctype_cloneactionfrom = ""
except ValueError, e:
doctype_cloneactionfrom = ""
else:
doctype_cloneactionfrom = ""
if displayed is not None:
try:
displayed = wash_single_urlarg(urlarg=displayed, argreqdtype=str, argdefault="Y", maxstrlen=1, minstrlen=1)
except ValueError, e:
displayed = "Y"
else:
displayed = "Y"
if buttonorder is not None:
try:
buttonorder = wash_single_urlarg(urlarg=buttonorder, argreqdtype=int, argdefault="")
except ValueError, e:
buttonorder = ""
else:
buttonorder = ""
if level is not None:
try:
level = wash_single_urlarg(urlarg=level, argreqdtype=str, argdefault="", maxstrlen=1, minstrlen=1)
except ValueError, e:
level = ""
else:
level = ""
if score is not None:
try:
score = wash_single_urlarg(urlarg=score, argreqdtype=int, argdefault="")
except ValueError, e:
score = ""
else:
score = ""
if stpage is not None:
try:
stpage = wash_single_urlarg(urlarg=stpage, argreqdtype=int, argdefault="")
except ValueError, e:
stpage = ""
else:
stpage = ""
if statustext is not None:
try:
statustext = wash_single_urlarg(urlarg=statustext, argreqdtype=str, argdefault="")
except ValueError, e:
statustext = ""
else:
statustext = ""
if endtxt is not None:
try:
endtxt = wash_single_urlarg(urlarg=endtxt, argreqdtype=str, argdefault="")
except ValueError, e:
endtxt = ""
else:
endtxt = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## since doctype ID is OK, process doctype configuration request:
if doctypedetailsedit not in ("", None):
(title, body) = _create_edit_doctype_details_form(doctype=doctype)
elif doctypedetailscommit not in ("", None):
## commit updated document type details
(title, body) = _edit_doctype_details(doctype=doctype,
doctypename=doctypename, doctypedescr=doctypedescr)
elif doctypecategoryadd not in ("", None):
## add new category:
(title, body) = _add_category_to_doctype(doctype=doctype, categid=categid, categdescr=categdescr)
elif doctypecategoryedit not in ("", None):
## create form to update category description:
(title, body) = _create_edit_category_form(doctype=doctype,
categid=categid)
elif doctypecategoryeditcommit not in ("", None):
## commit updated category description:
(title, body) = _edit_category_for_doctype(doctype=doctype, categid=categid, categdescr=categdescr)
elif doctypecategorydelete not in ("", None):
## delete a category
(title, body) = _delete_category_from_doctype(doctype=doctype, categid=categid)
elif movecategup not in ("", None) or movecategdown not in ("", None):
## move a category up or down in score:
(title, body) = _move_category(doctype=doctype, categid=categid,
movecategup=movecategup)
elif jumpcategout not in ("", None) and jumpcategin not in ("", None):
## jump a category from one score to another:
(title, body) = _jump_category_to_new_score(doctype=doctype, jumpcategout=jumpcategout,
jumpcategin=jumpcategin)
elif doctypesubmissionadd not in ("", None):
## form displaying option of adding doctype:
(title, body) = _create_add_submission_choose_clonefrom_form(doctype=doctype, action=action)
elif doctypesubmissionaddclonechosen not in ("", None):
## add a submission. if there is a document type to be cloned from, then process clone;
## otherwise, present form with details of doctype
if doctype_cloneactionfrom in ("", None, "None"):
## no clone - present form into which details of new submission should be entered
(title, body) = _create_add_submission_form(doctype=doctype, action=action)
else:
## new submission should be cloned from doctype_cloneactionfrom
(title, body) = _add_submission_to_doctype_clone(doctype=doctype, action=action, clonefrom=doctype_cloneactionfrom)
elif doctypesubmissiondelete not in ("", None):
## create form to prompt for confirmation of deletion of a submission:
(title, body) = _create_delete_submission_form(doctype=doctype, action=action)
elif doctypesubmissiondeleteconfirm not in ("", None):
## process the deletion of a submission from the doctype concerned:
(title, body) = _delete_submission_from_doctype(doctype=doctype, action=action)
elif doctypesubmissionedit not in ("", None):
## create form to update details of a submission
(title, body) = _create_edit_submission_form(doctype=doctype, action=action)
elif doctypesubmissioneditdetailscommit not in ("", None):
## commit updated submission details:
(title, body) = _edit_submission_for_doctype(doctype=doctype, action=action,
displayed=displayed, buttonorder=buttonorder, statustext=statustext,
level=level, score=score, stpage=stpage, endtxt=endtxt)
elif doctypesubmissionadddetailscommit not in ("", None):
## commit new submission to doctype (not by cloning)
(title, body) = _add_submission_to_doctype(doctype=doctype, action=action,
displayed=displayed, buttonorder=buttonorder, statustext=statustext,
level=level, score=score, stpage=stpage, endtxt=endtxt)
else:
## default - display root of edit doctype
(title, body) = _create_configure_doctype_form(doctype=doctype, jumpcategout=jumpcategout)
return (title, body)
def _create_configure_doctype_submission_functions_form(doctype,
action,
movefromfunctionname="",
movefromfunctionstep="",
movefromfunctionscore="",
user_msg=""):
title = """Functions of the "%s" Submission of the "%s" Document Type:""" % (action, doctype)
submission_functions = get_functionname_step_score_allfunctions_doctypesubmission(doctype=doctype, action=action)
body = websubmitadmin_templates.tmpl_configuredoctype_display_submissionfunctions(doctype=doctype,
action=action,
movefromfunctionname=movefromfunctionname,
movefromfunctionstep=movefromfunctionstep,
movefromfunctionscore=movefromfunctionscore,
submissionfunctions=submission_functions,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_add_function_form(doctype, action, addfunctionname="",
addfunctionstep="", addfunctionscore="", user_msg=""):
"""Create a form that allows a user to add a function a submission.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param addfunctionname: (string) the name of the function to be added to the submission (passed in case of page refresh)
@param addfunctionstep: (integer) the step of the submission into which the function is to be added (passed in case of
page refresh)
@param addfunctionscore: (integer) the score at at which the function is to be added (passed in case of page refresh)
@param user_msg: (string or list of strings) any message(s) to be displayed to the user
@return: (tuple) containing 2 strings - (page-title, HTML page-body)
"""
title = """Add a function to the [%s] submission of the [%s] document type""" % (action, doctype)
submission_functions = get_functionname_step_score_allfunctions_doctypesubmission(doctype=doctype, action=action)
## get names of all WebSubmit functions:
all_websubmit_functions = get_names_of_all_functions()
## put names into a list of single-element tuples, so that template can make HTML select list with them:
all_websubmit_functions = map(lambda x: (str(x),), all_websubmit_functions)
## create page body:
body = websubmitadmin_templates.tmpl_configuredoctype_add_submissionfunction(doctype=doctype,
action=action,
cursubmissionfunctions=submission_functions,
allWSfunctions=all_websubmit_functions,
addfunctionname=addfunctionname,
addfunctionstep=addfunctionstep,
addfunctionscore=addfunctionscore,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_list_parameters_form(doctype,
action,
functionname,
user_msg=""):
title = """Parameters of the %s function, as used in the %s document type"""\
% (functionname, doctype)
funcparams = get_function_parameters(function=functionname)
if len(funcparams) > 0:
## get the values
paramslist = map(lambda x: str(x[0]), funcparams)
params = get_function_parameter_vals_doctype(doctype=doctype, paramlist=paramslist)
else:
params = ()
## params = get_parameters_name_and_value_for_function_of_doctype(doctype=doctype, function=functionname)
body = websubmitadmin_templates.tmpl_configuredoctype_list_functionparameters(doctype=doctype,
action=action,
function=functionname,
params=params,
user_msg=user_msg)
return (title, body)
def _update_submission_function_parameter_file(doctype, action, functionname,
paramname, paramfilecontent):
user_msg = []
## get the filename:
paramval_res = get_value_of_parameter_for_doctype(doctype=doctype, parameter=paramname)
if paramval_res is None:
## this parameter doesn't exist for this doctype!
user_msg.append("The parameter [%s] doesn't exist for the document type [%s]!" % (paramname, doctype))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
paramval = str(paramval_res)
filename = basename(paramval)
if filename == "":
## invalid filename
user_msg.append("[%s] is an invalid filename - cannot save details" % (paramval,))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
## save file:
try:
save_update_to_file(filepath="%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), filecontent=paramfilecontent)
except InvenioWebSubmitAdminWarningIOError, e:
## could not correctly update the file!
user_msg.append(str(e))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
## redisplay form
user_msg.append("""[%s] file updated""" % (filename,))
(title, body) = _create_configure_doctype_submission_functions_edit_parameter_file_form(doctype=doctype,
action=action,
functionname=functionname,
paramname=paramname,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_edit_parameter_file_form(doctype,
action,
functionname,
paramname,
user_msg=""):
if type(user_msg) is not list:
user_msg = []
paramval_res = get_value_of_parameter_for_doctype(doctype=doctype, parameter=paramname)
if paramval_res is None:
## this parameter doesn't exist for this doctype!
user_msg.append("The parameter [%s] doesn't exist for the document type [%s]!" % (paramname, doctype))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname)
return (title, body)
paramval = str(paramval_res)
title = "Edit the [%s] file for the [%s] document type" % (paramval, doctype)
## get basename of file:
filecontent = ""
filename = basename(paramval)
if filename == "":
## invalid filename
user_msg.append("[%s] is an invalid filename" % (paramval,))
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
## try to read file contents:
if access("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), F_OK):
## file exists
if access("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), R_OK) and \
isfile("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename)):
## file is a regular file and is readable - get contents
filecontent = open("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename), "r").read()
else:
if not isfile("%s/%s" % (CFG_WEBSUBMIT_BIBCONVERTCONFIGDIR, filename)):
## file is not a regular file
user_msg.append("The parameter file [%s] is not regular file - unable to read" % (filename,))
else:
## file is not readable - error message
user_msg.append("The parameter file [%s] could not be read - check permissions" % (filename,))
## display page listing the parameters of this function:
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname,
user_msg=user_msg)
return (title, body)
else:
## file does not exist:
user_msg.append("The parameter file [%s] does not exist - it will be created" % (filename,))
## make page body:
body = websubmitadmin_templates.tmpl_configuredoctype_edit_functionparameter_file(doctype=doctype,
action=action,
function=functionname,
paramname=paramname,
paramfilename=filename,
paramfilecontent=filecontent,
user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_functions_edit_parameter_value_form(doctype,
action,
functionname,
paramname,
paramval="",
user_msg=""):
title = """Edit the value of the [%s] Parameter""" % (paramname,)
## get the parameter's value from the DB:
paramval_res = get_value_of_parameter_for_doctype(doctype=doctype, parameter=paramname)
if paramval_res is None:
## this parameter doesn't exist for this doctype!
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname)
if paramval == "":
## use whatever retrieved paramval_res contains:
paramval = str(paramval_res)
body = websubmitadmin_templates.tmpl_configuredoctype_edit_functionparameter_value(doctype=doctype,
action=action,
function=functionname,
paramname=paramname,
paramval=paramval)
return (title, body)
def _update_submissionfunction_parameter_value(doctype, action, functionname, paramname, paramval):
user_msg = []
try:
update_value_of_function_parameter_for_doctype(doctype=doctype, paramname=paramname, paramval=paramval)
user_msg.append("""The value of the parameter [%s] was updated for document type [%s]""" % (paramname, doctype))
except InvenioWebSubmitAdminWarningTooManyRows, e:
## multiple rows found for param - update not carried out
user_msg.append(str(e))
except InvenioWebSubmitAdminWarningNoRowsFound, e:
## no rows found - parameter does not exist for doctype, therefore no update
user_msg.append(str(e))
(title, body) = \
_create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype, action=action,
functionname=functionname, user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype_submissionfunctions_parameters(doctype,
action,
functionname,
functionstep,
functionscore,
paramname="",
paramval="",
editfunctionparametervalue="",
editfunctionparametervaluecommit="",
editfunctionparameterfile="",
editfunctionparameterfilecommit="",
paramfilename="",
paramfilecontent=""):
body = ""
user_msg = []
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
if editfunctionparametervaluecommit not in ("", None):
## commit an update to a function parameter:
(title, body) = _update_submissionfunction_parameter_value(doctype=doctype, action=action, functionname=functionname,
paramname=paramname, paramval=paramval)
elif editfunctionparametervalue not in ("", None):
## display a form for editing the value of a parameter:
(title, body) = _create_configure_doctype_submission_functions_edit_parameter_value_form(doctype=doctype,
action=action,
functionname=functionname,
paramname=paramname,
paramval=paramval)
elif editfunctionparameterfile not in ("", None):
## display a form for editing the contents of a file, named by the parameter's value:
(title, body) = _create_configure_doctype_submission_functions_edit_parameter_file_form(doctype=doctype,
action=action,
functionname=functionname,
paramname=paramname)
elif editfunctionparameterfilecommit not in ("", None):
(title, body) = _update_submission_function_parameter_file(doctype=doctype, action=action, functionname=functionname,
paramname=paramname, paramfilecontent=paramfilecontent)
else:
## default - display list of parameters for function:
(title, body) = _create_configure_doctype_submission_functions_list_parameters_form(doctype=doctype,
action=action,
functionname=functionname)
return (title, body)
def perform_request_configure_doctype_submissionfunctions(doctype,
action,
moveupfunctionname="",
moveupfunctionstep="",
moveupfunctionscore="",
movedownfunctionname="",
movedownfunctionstep="",
movedownfunctionscore="",
movefromfunctionname="",
movefromfunctionstep="",
movefromfunctionscore="",
movetofunctionname="",
movetofunctionstep="",
movetofunctionscore="",
deletefunctionname="",
deletefunctionstep="",
deletefunctionscore="",
configuresubmissionaddfunction="",
configuresubmissionaddfunctioncommit="",
addfunctionname="",
addfunctionstep="",
addfunctionscore=""):
body = ""
user_msg = []
if addfunctionstep != "":
try:
addfunctionstep = str(wash_single_urlarg(urlarg=addfunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
addfunctionstep = ""
if addfunctionscore != "":
try:
addfunctionscore = str(wash_single_urlarg(urlarg=addfunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
addfunctionscore = ""
if deletefunctionstep != "":
try:
deletefunctionstep = str(wash_single_urlarg(urlarg=deletefunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
deletefunctionstep = ""
if deletefunctionscore != "":
try:
deletefunctionscore = str(wash_single_urlarg(urlarg=deletefunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
deletefunctionscore = ""
if movetofunctionstep != "":
try:
movetofunctionstep = str(wash_single_urlarg(urlarg=movetofunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
movetofunctionstep = ""
if movetofunctionscore != "":
try:
movetofunctionscore = str(wash_single_urlarg(urlarg=movetofunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
movetofunctionscore = ""
if moveupfunctionstep != "":
try:
moveupfunctionstep = str(wash_single_urlarg(urlarg=moveupfunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
moveupfunctionstep = ""
if moveupfunctionscore != "":
try:
moveupfunctionscore = str(wash_single_urlarg(urlarg=moveupfunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
moveupfunctionscore = ""
if movedownfunctionstep != "":
try:
movedownfunctionstep = str(wash_single_urlarg(urlarg=movedownfunctionstep, argreqdtype=int, argdefault=""))
except ValueError, e:
movedownfunctionstep = ""
if movedownfunctionscore != "":
try:
movedownfunctionscore = str(wash_single_urlarg(urlarg=movedownfunctionscore, argreqdtype=int, argdefault=""))
except ValueError, e:
movedownfunctionscore = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## submission valid
if movefromfunctionname != "" and movefromfunctionstep != "" and movefromfunctionscore != "" and \
movetofunctionname != "" and movetofunctionstep != "" and movetofunctionscore != "":
## process moving the function by jumping it to another position
try:
move_submission_function_from_one_position_to_another_position(doctype=doctype, action=action,
movefuncname=movefromfunctionname,
movefuncfromstep=movefromfunctionstep,
movefuncfromscore=movefromfunctionscore,
movefunctostep=movetofunctionstep,
movefunctoscore=movetofunctionscore)
user_msg.append("""The function [%s] at step [%s], score [%s] was successfully moved."""\
% (movefromfunctionname, movefromfunctionstep, movefromfunctionscore))
except Exception, e:
## there was a problem
user_msg.append(str(e))
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif moveupfunctionname != "" and moveupfunctionstep != "" and moveupfunctionscore != "":
## process moving the function up one position
error_code = move_position_submissionfunction_up(doctype=doctype,
action=action,
function=moveupfunctionname,
funccurstep=moveupfunctionstep,
funccurscore=moveupfunctionscore)
if error_code == 0:
## success
user_msg.append("""The Function "%s" that was located at step %s, score %s, has been moved upwards""" \
% (moveupfunctionname, moveupfunctionstep, moveupfunctionscore))
else:
## could not move it
user_msg.append("""Unable to move the Function "%s" that is located at step %s, score %s""" \
% (moveupfunctionname, moveupfunctionstep, moveupfunctionscore))
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif movedownfunctionname != "" and movedownfunctionstep != "" and movedownfunctionscore != "":
## process moving the function down one position
error_code = move_position_submissionfunction_down(doctype=doctype,
action=action,
function=movedownfunctionname,
funccurstep=movedownfunctionstep,
funccurscore=movedownfunctionscore)
if error_code == 0:
## success
user_msg.append("""The Function "%s" that was located at step %s, score %s, has been moved downwards""" \
% (movedownfunctionname, movedownfunctionstep, movedownfunctionscore))
else:
## could not move it
user_msg.append("""Unable to move the Function "%s" that is located at step %s, score %s""" \
% (movedownfunctionname, movedownfunctionstep, movedownfunctionscore))
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif deletefunctionname != "" and deletefunctionstep != "" and deletefunctionscore != "":
## process deletion of function from the given position
(title, body) = _delete_submission_function(doctype=doctype, action=action, deletefunctionname=deletefunctionname,
deletefunctionstep=deletefunctionstep, deletefunctionscore=deletefunctionscore)
elif configuresubmissionaddfunction != "":
## display a form that allows the addition of a new WebSubmit function
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action)
elif configuresubmissionaddfunctioncommit != "":
## process the addition of the new WebSubmit function to the submission:
(title, body) = _add_function_to_submission(doctype=doctype, action=action, addfunctionname=addfunctionname,
addfunctionstep=addfunctionstep, addfunctionscore=addfunctionscore)
else:
## default - display functions for this submission
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype,
action=action,
movefromfunctionname=movefromfunctionname,
movefromfunctionstep=movefromfunctionstep,
movefromfunctionscore=movefromfunctionscore
)
return (title, body)
def _add_function_to_submission(doctype, action, addfunctionname, addfunctionstep, addfunctionscore):
"""Process the addition of a function to a submission.
The user can decide in which step and at which score to insert the function.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param addfunctionname: (string) the name of the function to be added to the submission
@param addfunctionstep: (integer) the step at which the function is to be added
@param addfunctionscore: (integer) the score at which the function is to be added
@return: a tuple containing 2 strings: (page-title, page-body)
"""
user_msg = []
if addfunctionname == "" or addfunctionstep == "" or addfunctionscore == "":
## invalid details!
user_msg.append("""Invalid function coordinates supplied!""")
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
user_msg=user_msg)
return (title, body)
try:
if int(addfunctionstep) < 1 or int(addfunctionscore) < 1:
## invalid details!
user_msg.append("""Invalid function step and/or score!""")
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
user_msg=user_msg)
return (title, body)
except ValueError:
user_msg.append("""Invalid function step and/or score!""")
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
user_msg=user_msg)
try:
insert_function_into_submission_at_step_and_score_then_regulate_scores_of_functions_in_step(doctype=doctype,
action=action,
function=addfunctionname,
step=addfunctionstep,
score=addfunctionscore)
except InvenioWebSubmitAdminWarningReferentialIntegrityViolation, e:
## Function didn't exist in WebSubmit! Not added to submission.
user_msg.append(str(e))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_submission_functions_add_function_form(doctype=doctype,
action=action,
addfunctionstep=addfunctionstep,
addfunctionscore=addfunctionscore,
user_msg=user_msg)
return (title, body)
except InvenioWebSubmitAdminWarningInsertFailed, e:
## insert failed - some functions within the step may have been corrupted!
user_msg.append(str(e))
## TODO : LOG ERROR
(title, body) = \
_create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
except InvenioWebSubmitAdminWarningDeleteFailed, e:
## when regulating the scores of functions within the step, could not delete some or all of the functions
## within the step that the function was added to. Some functions may have been lost!
user_msg.append(str(e))
## TODO : LOG ERROR
(title, body) = \
_create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## Successfully added
user_msg.append("""The function [%s] has been added to submission [%s] at step [%s], score [%s]."""\
% (addfunctionname, "%s%s" % (action, doctype), addfunctionstep, addfunctionscore))
(title, body) = \
_create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
def _delete_submission_function(doctype, action, deletefunctionname, deletefunctionstep, deletefunctionscore):
"""Delete a submission function from a given submission. Re-order all functions below it (within the same step)
to fill the gap left by the deleted function.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param deletefunctionname: (string) the name of the function to be deleted from the submission
@param deletefunctionstep: (string) the step of the function to be deleted from the submission
@param deletefunctionscore: (string) the score of the function to be deleted from the submission
@return: tuple containing 2 strings: (page-title, page-body)
"""
user_msg = []
## first, delete the function:
try:
delete_function_at_step_and_score_from_submission(doctype=doctype, action=action,
function=deletefunctionname, step=deletefunctionstep,
score=deletefunctionscore)
except InvenioWebSubmitAdminWarningDeleteFailed, e:
## unable to delete function - error message and return
user_msg.append("""Unable to delete function [%s] at step [%s], score [%s] from submission [%s]""" \
% (deletefunctionname, deletefunctionstep, deletefunctionscore, "%s%s" % (action, doctype)))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## now, correct the scores of all functions in the step from which a function was just deleted:
try:
regulate_score_of_all_functions_in_step_to_ascending_multiples_of_10_for_submission(doctype=doctype,
action=action,
step=deletefunctionstep)
except InvenioWebSubmitAdminWarningDeleteFailed, e:
## couldnt delete the functions before reordering them
user_msg.append("""Deleted function [%s] at step [%s], score [%s] from submission [%s], but could not re-order""" \
""" scores of remaining functions within step [%s]""" \
% (deletefunctionname, deletefunctionstep, deletefunctionscore,
"%s%s" % (action, doctype), deletefunctionstep))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## update submission "last-modification" date:
update_modification_date_for_submission(doctype=doctype, action=action)
## success message:
user_msg.append("""Successfully deleted function [%s] at step [%s], score [%s] from submission [%s]""" \
% (deletefunctionname, deletefunctionstep, deletefunctionscore, "%s%s" % (action, doctype)))
## TODO : LOG function Deletion
(title, body) = _create_configure_doctype_submission_functions_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype_submissionpage_preview(doctype, action, pagenum):
"""Display a preview of a Submission Page and its fields.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the submission page to be previewed
@return: a tuple of four elements. (page-title, page-body)
"""
body = ""
user_msg = []
try:
pagenum = str(pagenum)
except ValueError:
pagenum = ""
if pagenum != "":
try:
pagenum = str(wash_single_urlarg(urlarg=pagenum, argreqdtype=int, argdefault=""))
except ValueError, e:
pagenum = ""
## ensure that the page number for this submission is valid:
num_pages_submission = get_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
try:
if not (int(pagenum) > 0 and int(pagenum) <= num_pages_submission):
user_msg.append("Invalid page number - out of range")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
except ValueError:
## invalid page number
user_msg.append("Invalid page number - must be an integer value!")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## get details of all fields on submission page:
fields = get_details_and_description_of_all_fields_on_submissionpage(doctype=doctype, action=action, pagenum=pagenum)
## ensure all values for each field are strings:
string_fields = []
for field in fields:
string_fields.append(stringify_list_elements(field))
title = """A preview of Page %s of the %s Submission""" % (pagenum, "%s%s" % (action, doctype))
body = websubmitadmin_templates.tmpl_configuredoctype_display_submissionpage_preview(doctype=doctype,
action=action,
pagenum=pagenum,
fields=string_fields)
return (title, body)
def perform_request_configure_doctype_submissionpage_elements(doctype, action, pagenum, movefieldfromposn="",
movefieldtoposn="", deletefieldposn="", editfieldposn="",
editfieldposncommit="", fieldname="", fieldtext="", fieldlevel="",
fieldshortdesc="", fieldcheck="", addfield="", addfieldcommit=""):
"""Process requests relating to the elements of a particular submission page"""
body = ""
user_msg = []
try:
pagenum = str(pagenum)
except ValueError:
pagenum = ""
if pagenum != "":
try:
pagenum = str(wash_single_urlarg(urlarg=pagenum, argreqdtype=int, argdefault=""))
except ValueError, e:
pagenum = ""
if fieldname != "":
try:
fieldname = wash_single_urlarg(urlarg=fieldname, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=fieldname) == 0:
fieldname = ""
except ValueError, e:
fieldname = ""
if fieldtext != "":
try:
fieldtext = wash_single_urlarg(urlarg=fieldtext, argreqdtype=str, argdefault="")
except ValueError, e:
fieldtext = ""
if fieldlevel != "":
try:
fieldlevel = wash_single_urlarg(urlarg=fieldlevel, argreqdtype=str, argdefault="O", maxstrlen=1, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=fieldlevel) == 0:
fieldlevel = "O"
if fieldlevel not in ("m", "M", "o", "O"):
fieldlevel = "O"
fieldlevel = fieldlevel.upper()
except ValueError, e:
fieldlevel = "O"
if fieldshortdesc != "":
try:
fieldshortdesc = wash_single_urlarg(urlarg=fieldshortdesc, argreqdtype=str, argdefault="")
except ValueError, e:
fieldshortdesc = ""
if fieldcheck != "":
try:
fieldcheck = wash_single_urlarg(urlarg=fieldcheck, argreqdtype=str, argdefault="", maxstrlen=15, minstrlen=1)
if string_is_alphanumeric_including_underscore(txtstring=fieldcheck) == 0:
fieldcheck = ""
except ValueError, e:
fieldcheck = ""
if editfieldposn != "":
try:
editfieldposn = str(wash_single_urlarg(urlarg=editfieldposn, argreqdtype=int, argdefault=""))
except ValueError, e:
editfieldposn = ""
if deletefieldposn != "":
try:
deletefieldposn = str(wash_single_urlarg(urlarg=deletefieldposn, argreqdtype=int, argdefault=""))
except ValueError, e:
deletefieldposn = ""
if movefieldfromposn != "":
try:
movefieldfromposn = str(wash_single_urlarg(urlarg=movefieldfromposn, argreqdtype=int, argdefault=""))
except ValueError, e:
movefieldfromposn = ""
if movefieldtoposn != "":
try:
movefieldtoposn = str(wash_single_urlarg(urlarg=movefieldtoposn, argreqdtype=int, argdefault=""))
except ValueError, e:
movefieldtoposn = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## ensure that the page number for this submission is valid:
num_pages_submission = get_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
try:
if not (int(pagenum) > 0 and int(pagenum) <= num_pages_submission):
user_msg.append("Invalid page number - out of range")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
except ValueError:
## invalid page number
user_msg.append("Invalid page number - must be an integer value!")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
## submission valid
if editfieldposn != "" and editfieldposncommit == "":
## display form for editing field
(title, body) = _configure_doctype_edit_field_on_submissionpage_display_field_details(doctype=doctype, action=action,
pagenum=pagenum, fieldposn=editfieldposn)
elif editfieldposn != "" and editfieldposncommit != "":
## commit changes to element
(title, body) = _configure_doctype_edit_field_on_submissionpage(doctype=doctype, action=action,
pagenum=pagenum, fieldposn=editfieldposn, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc, fieldcheck=fieldcheck)
elif movefieldfromposn != "" and movefieldtoposn != "":
## move a field
(title, body) = _configure_doctype_move_field_on_submissionpage(doctype=doctype,
action=action, pagenum=pagenum, movefieldfromposn=movefieldfromposn,
movefieldtoposn=movefieldtoposn)
elif addfield != "":
## request to add a new field to a page - display form
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype=doctype, action=action, pagenum=pagenum)
elif addfieldcommit != "":
## commit a new field to the page
(title, body) = _configure_doctype_add_field_to_submissionpage(doctype=doctype, action=action,
pagenum=pagenum, fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc, fieldcheck=fieldcheck)
elif deletefieldposn != "":
## user wishes to delete a field from the page:
(title, body) = _configure_doctype_delete_field_from_submissionpage(doctype=doctype,
action=action, pagenum=pagenum, fieldnum=deletefieldposn)
else:
## default visit to page - list its elements:
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action,
pagenum=pagenum, movefieldfromposn=movefieldfromposn)
return (title, body)
def stringify_list_elements(elementslist):
o = []
for el in elementslist:
o.append(str(el))
return o
def _configure_doctype_edit_field_on_submissionpage(doctype, action, pagenum, fieldposn,
fieldtext, fieldlevel, fieldshortdesc, fieldcheck):
"""Perform an update to the details of a field on a submission page.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the page on which the element to be updated is found
@param fieldposn: (integer) the numeric position of the field to be editied
@param fieldtext: (string) the text label displayed with a field
@param fieldlevel: (char) M or O (whether the field is mandatory or optional)
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the name of a JavaScript check to be applied to a field
@return: a tuple containing 2 strings - (page-title, page-body)
"""
user_msg = []
if fieldcheck not in ("", None):
## ensure check exists:
checkres = get_number_jschecks_with_chname(chname=fieldcheck)
if checkres < 1:
user_msg.append("The Check '%s' does not exist in WebSubmit - changes to field not saved" % (fieldcheck,))
(title, body) = _configure_doctype_edit_field_on_submissionpage_display_field_details(doctype=doctype, action=action,
pagenum=pagenum, fieldposn=fieldposn,
fieldtext=fieldtext, fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc, user_msg=user_msg)
return (title, body)
try:
update_details_of_a_field_on_a_submissionpage(doctype=doctype, action=action, pagenum=pagenum, fieldposn=fieldposn,
fieldtext=fieldtext, fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck)
user_msg.append("The details of the field at position %s have been updated successfully" % (fieldposn,))
update_modification_date_for_submission(doctype=doctype, action=action)
except InvenioWebSubmitAdminWarningTooManyRows, e:
## multiple rows found at page position - not safe to edit:
user_msg.append("Unable to update details of field at position %s on submission page %s - multiple fields found at this position" \
% (fieldposn, pagenum))
## TODO : LOG WARNING
except InvenioWebSubmitAdminWarningNoRowsFound, e:
## field not found - cannot edit
user_msg.append("Unable to update details of field at position %s on submission page %s - field doesn't seem to exist there!" \
% (fieldposn, pagenum))
## TODO : LOG WARNING
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
return (title, body)
def _configure_doctype_edit_field_on_submissionpage_display_field_details(doctype, action, pagenum, fieldposn,
fieldtext=None, fieldlevel=None, fieldshortdesc=None,
fieldcheck=None, user_msg=""):
"""Display a form used to edit a field on a submission page.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the page on which the element to be updated is found
@param fieldposn: (integer) the numeric position of the field to be editied
@param fieldtext: (string) the text label displayed with a field
@param fieldlevel: (char) M or O (whether the field is mandatory or optional)
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the name of a JavaScript check to be applied to a field
@param user_msg: (list of strings, or string) any warning/error message to be displayed to the user
@return: a tuple containing 2 strings (page-title, page-body)
"""
if type(user_msg) not in (list, tuple) or user_msg == "":
user_msg = []
## get a list of all check names:
checks_res = get_all_jscheck_names()
allchecks=[]
for check in checks_res:
allchecks.append((check,))
## get the details for the field to be edited:
fielddets = get_details_of_field_at_positionx_on_submissionpage(doctype=doctype, action=action, pagenum=pagenum, fieldposition=fieldposn)
if len(fielddets) < 1:
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum)
return (title, body)
fieldname = str(fielddets[2])
if fieldtext is not None:
fieldtext = str(fieldtext)
else:
fieldtext = str(fielddets[3])
if fieldlevel is not None:
fieldlevel = str(fieldlevel)
else:
fieldlevel = str(fielddets[4])
if fieldshortdesc is not None:
fieldshortdesc = str(fieldshortdesc)
else:
fieldshortdesc = str(fielddets[5])
if fieldcheck is not None:
fieldcheck = str(fieldcheck)
else:
fieldcheck = str(fielddets[6])
cd = str(fielddets[7])
md = str(fielddets[8])
title = """Edit the %(fieldname)s field as it appears at position %(fieldnum)s on Page %(pagenum)s of the %(submission)s Submission""" \
% { 'fieldname' : fieldname, 'fieldnum' : fieldposn, 'pagenum' : pagenum, 'submission' : "%s%s" % (action, doctype) }
body = websubmitadmin_templates.tmpl_configuredoctype_edit_submissionfield(doctype=doctype,
action=action,
pagenum=pagenum,
fieldnum=fieldposn,
fieldname=fieldname,
fieldtext=fieldtext,
fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck,
cd=cd,
md=md,
allchecks=allchecks,
user_msg=user_msg)
return (title, body)
def _configure_doctype_add_field_to_submissionpage(doctype, action, pagenum, fieldname="",
fieldtext="", fieldlevel="", fieldshortdesc="", fieldcheck=""):
"""Add a field to a submission page.
@param doctype: (string) the unique ID of a document type
@param action: (string) the unique ID of an action
@param pagenum: (integer) the number of the page on which the element to be updated is found
@param fieldname: (string) the name of the field to be added to the page
@param fieldtext: (string) the text label displayed with a field
@param fieldlevel: (char) M or O (whether the field is mandatory or optional)
@param fieldshortdesc: (string) the short description of a field
@param fieldcheck: (string) the name of a JavaScript check to be applied to a field
@return: a tuple containing 2 strings - (page-title, page-body)
"""
user_msg = []
## ensure that a field "fieldname" actually exists:
if fieldname == "":
## the field to be added has no element description in the WebSubmit DB - cannot add
user_msg.append("""The field that you have chosen to add does not seem to exist in WebSubmit - cannot add""")
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck, user_msg=user_msg)
return (title, body)
numelements_elname = get_number_elements_with_elname(elname=fieldname)
if numelements_elname < 1:
## the field to be added has no element description in the WebSubmit DB - cannot add
user_msg.append("""The field that you have chosen to add (%s) does not seem to exist in WebSubmit - cannot add""" % (fieldname,))
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck, user_msg=user_msg)
return (title, body)
## if fieldcheck has been provided, ensure that it is a valid check name:
if fieldcheck not in ("", None):
## ensure check exists:
checkres = get_number_jschecks_with_chname(chname=fieldcheck)
if checkres < 1:
user_msg.append("The Check '%s' does not exist in WebSubmit - new field not saved to page" % (fieldcheck,))
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
user_msg=user_msg)
return (title, body)
## now add the new field to the page:
try:
insert_field_onto_submissionpage(doctype=doctype, action=action, pagenum=pagenum, fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc, fieldcheck=fieldcheck)
user_msg.append("""Successfully added the field "%s" to the last position on page %s of submission %s""" \
% (fieldname, pagenum, "%s%s" % (action, doctype)))
update_modification_date_for_submission(doctype=doctype, action=action)
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
except InvenioWebSubmitAdminWarningInsertFailed, e:
## the insert of the new field failed for some reason
## TODO : LOG ERROR
user_msg.append("""Couldn't add the field "%s" to page %s of submission %s - please try again""" \
% (fieldname, pagenum, "%s%s" % (action, doctype)))
(title, body) = _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum,
fieldname=fieldname, fieldtext=fieldtext,
fieldlevel=fieldlevel, fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck, user_msg=user_msg)
return (title, body)
def _configure_doctype_add_field_to_submissionpage_display_form(doctype, action, pagenum, fieldname="", fieldtext="",
fieldlevel="", fieldshortdesc="", fieldcheck="", user_msg=""):
title = """Add a Field to Page %(pagenum)s of the %(submission)s Submission""" \
% { 'pagenum' : pagenum, 'submission' : "%s%s" % (action, doctype) }
## sanity checking:
if type(user_msg) not in (list, tuple) or user_msg == "":
user_msg = []
## get a list of all check names:
checks_res = get_all_jscheck_names()
allchecks=[]
for check in checks_res:
allchecks.append((check,))
## get a list of all WebSubmit element names:
elements_res = get_all_element_names()
allelements = []
for el in elements_res:
allelements.append((el,))
## get form:
body = websubmitadmin_templates.tmpl_configuredoctype_add_submissionfield(doctype=doctype,
action=action,
pagenum=pagenum,
fieldname=fieldname,
fieldtext=fieldtext,
fieldlevel=fieldlevel,
fieldshortdesc=fieldshortdesc,
fieldcheck=fieldcheck,
allchecks=allchecks,
allelements=allelements,
user_msg=user_msg)
return (title, body)
def _configure_doctype_move_field_on_submissionpage(doctype, action, pagenum, movefieldfromposn, movefieldtoposn):
user_msg = []
_ = gettext_set_language(CFG_SITE_LANG)
movefield_res = move_field_on_submissionpage_from_positionx_to_positiony(doctype=doctype, action=action, pagenum=pagenum,
movefieldfrom=movefieldfromposn, movefieldto=movefieldtoposn)
if movefield_res == 1:
## invalid field numbers
try:
raise InvenioWebSubmitWarning(_('Unable to move field at position %s to position %s on page %s of submission \'%s%s\' - Invalid Field Position Numbers') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_INVALID_FIELD_NUMBERS_SUPPLIED_WHEN_TRYING_TO_MOVE_FIELD_ON_SUBMISSION_PAGE', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype)))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - field position numbers invalid""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 2:
## failed to swap 2 fields - couldn't move field1 to temp position
try:
raise InvenioWebSubmitWarning(_('Unable to swap field at position %s with field at position %s on page %s of submission %s - could not move field at position %s to temporary field location') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_SWAP_TWO_FIELDS_ON_SUBMISSION_PAGE_COULDNT_MOVE_FIELD1_TO_TEMP_POSITION', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype)))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 3:
## failed to swap 2 fields on submission page - couldn't move field2 to field1 position
try:
raise InvenioWebSubmitWarning(_('Unable to swap field at position %s with field at position %s on page %s of submission %s - could not move field at position %s to position %s. Please ask Admin to check that a field was not stranded in a temporary position') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_SWAP_TWO_FIELDS_ON_SUBMISSION_PAGE_COULDNT_MOVE_FIELD2_TO_FIELD1_POSITION', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldtoposn, movefieldfromposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - See Admin if field order is broken""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 4:
## failed to swap 2 fields in submission page - couldnt swap field at temp position to field2 position
try:
raise InvenioWebSubmitWarning(_('Unable to swap field at position %s with field at position %s on page %s of submission %s - could not move field that was located at position %s to position %s from temporary position. Field is now stranded in temporary position and must be corrected manually by an Admin') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype, movefieldfromposn, movefieldtoposn))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_SWAP_TWO_FIELDS_ON_SUBMISSION_PAGE_COULDNT_MOVE_FIELD1_TO_POSITION_FIELD2_FROM_TEMPORARY_POSITION', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldfromposn, movefieldtoposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - Field-order is now broken and must be corrected by Admin""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 5:
## failed to decrement the position of all fields below the field that was moved to a temp position
try:
raise InvenioWebSubmitWarning(_('Unable to move field at position %s to position %s on page %s of submission %s - could not decrement the position of the fields below position %s. Tried to recover - please check that field ordering is not broken') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype, movefieldfromposn))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_MOVE_FIELD_TO_NEW_POSITION_ON_SUBMISSION_PAGE_COULDNT_DECREMENT_POSITION_OF_FIELDS_BELOW_FIELD1', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldfromposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - See Admin if field-order is broken""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
elif movefield_res == 6:
## failed to increment position of fields in and below position into which 'movefromfieldposn' is to be inserted
try:
raise InvenioWebSubmitWarning(_('Unable to move field at position %s to position %s on page %s of submission %s%s - could not increment the position of the fields at and below position %s. The field that was at position %s is now stranded in a temporary position.') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype, movefieldtoposn, movefieldfromposn))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_MOVE_FIELD_TO_NEW_POSITION_ON_SUBMISSION_PAGE_COULDNT_INCREMENT_POSITION_OF_FIELDS_AT_AND_BELOW_FIELD2', \
#movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype), movefieldtoposn, movefieldfromposn))
user_msg.append("""Unable to move field from position %s to position %s on page %s of submission %s%s - Field-order is now broken and must be corrected by Admin""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
else:
## successful update:
try:
raise InvenioWebSubmitWarning(_('Moved field from position %s to position %s on page %s of submission \'%s%s\'.') % (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_MOVED_FIELD_ON_SUBMISSION_PAGE', movefieldfromposn, movefieldtoposn, pagenum, "%s%s" % (action, doctype)))
user_msg.append("""Successfully moved field from position %s to position %s on page %s of submission %s%s""" \
% (movefieldfromposn, movefieldtoposn, pagenum, action, doctype))
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
return (title, body)
def _configure_doctype_delete_field_from_submissionpage(doctype, action, pagenum, fieldnum):
"""Delete a field from a submission page"""
_ = gettext_set_language(CFG_SITE_LANG)
user_msg = []
del_res = delete_a_field_from_submissionpage_then_reorder_fields_below_to_fill_vacant_position(doctype=doctype,
action=action,
pagenum=pagenum,
fieldposn=fieldnum)
if del_res == 1:
try:
raise InvenioWebSubmitWarning(_('Unable to delete field at position %s from page %s of submission \'%s\'') % (fieldnum, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_UNABLE_TO_DELETE_FIELD_FROM_SUBMISSION_PAGE', fieldnum, pagenum, "%s%s" % (action, doctype)))
user_msg.append("Unable to delete field at position %s from page number %s of submission %s%s" % (fieldnum, pagenum, action, doctype))
else:
## deletion was OK
user_msg.append("Field deleted")
try:
raise InvenioWebSubmitWarning(_('Unable to delete field at position %s from page %s of submission \'%s%s\'') % (fieldnum, pagenum, action, doctype))
except InvenioWebSubmitWarning, exc:
register_exception(stream='warning')
#warnings.append(exc.message)
#warnings.append(('WRN_WEBSUBMITADMIN_DELETED_FIELD_FROM_SUBMISSION_PAGE', fieldnum, pagenum, "%s%s" % (action, doctype)))
(title, body) = _create_configure_doctype_submission_page_elements_form(doctype=doctype, action=action, pagenum=pagenum, user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_page_elements_form(doctype, action, pagenum, movefieldfromposn="", user_msg=""):
## get list of elements for page:
title = """Submission Elements found on Page %s of the "%s" Submission of the "%s" Document Type:"""\
% (pagenum, action, doctype)
body = ""
raw_page_elements = get_details_allsubmissionfields_on_submission_page(doctype=doctype, action=action, pagenum=pagenum)
## correctly stringify page elements for the template:
page_elements = []
for element in raw_page_elements:
page_elements.append(stringify_list_elements(element))
body = websubmitadmin_templates.tmpl_configuredoctype_list_submissionelements(doctype=doctype,
action=action,
pagenum=pagenum,
page_elements=page_elements,
movefieldfromposn=movefieldfromposn,
user_msg=user_msg)
return (title, body)
def perform_request_configure_doctype_submissionpages(doctype,
action,
pagenum="",
movepage="",
movepagedirection="",
deletepage="",
deletepageconfirm="",
addpage=""):
"""Process requests relating to the submission pages of a doctype/submission"""
body = ""
user_msg = []
try:
pagenum = int(pagenum)
except ValueError:
pagenum = ""
## ensure that there is only one doctype for this doctype ID - simply display all doctypes with warning if not
if doctype in ("", None):
user_msg.append("""Unknown Document Type""")
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
numrows_doctype = get_number_doctypes_docid(docid=doctype)
if numrows_doctype > 1:
## there are multiple doctypes with this doctype ID:
## TODO : LOG ERROR
user_msg.append("""Multiple document types identified by "%s" exist - cannot configure at this time.""" \
% (doctype,))
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
elif numrows_doctype == 0:
## this doctype does not seem to exist:
user_msg.append("""The document type identified by "%s" doesn't exist - cannot configure at this time.""" \
% (doctype,))
## TODO : LOG ERROR
all_doctypes = get_docid_docname_alldoctypes()
body = websubmitadmin_templates.tmpl_display_alldoctypes(doctypes=all_doctypes, user_msg=user_msg)
title = "Available WebSubmit Document Types"
return (title, body)
## ensure that this submission exists for this doctype:
numrows_submission = get_number_submissions_doctype_action(doctype=doctype, action=action)
if numrows_submission > 1:
## there are multiple submissions for this doctype/action ID:
## TODO : LOG ERROR
user_msg.append("""The Submission "%s" seems to exist multiple times for the Document Type "%s" - cannot configure at this time.""" \
% (action, doctype))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
elif numrows_submission == 0:
## this submission does not seem to exist for this doctype:
user_msg.append("""The Submission "%s" doesn't exist for the "%s" Document Type - cannot configure at this time.""" \
% (action, doctype))
## TODO : LOG ERROR
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
## submission valid
if addpage != "":
## add a new page to a submission:
error_code = add_submission_page_doctype_action(doctype=doctype, action=action)
if error_code == 0:
## success
user_msg.append("""A new Submission Page has been added into the last position""")
else:
## could not move it
user_msg.append("""Unable to add a new Submission Page""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif movepage != "":
## user wants to move a page upwards in the order
(title, body) = _configure_doctype_move_submission_page(doctype=doctype,
action=action, pagenum=pagenum, direction=movepagedirection)
elif deletepage != "":
## user wants to delete a page:
if deletepageconfirm != "":
## confirmation of deletion has been provided - proceed
(title, body) = _configure_doctype_delete_submission_page(doctype=doctype,
action=action, pagenum=pagenum)
else:
## user has not yet confirmed the deletion of a page - prompt for confirmation
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
deletepagenum=pagenum)
else:
## default - display details of submission pages for this submission:
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action)
return (title, body)
def _configure_doctype_move_submission_page(doctype, action, pagenum, direction):
user_msg = []
## Sanity checking:
if direction.lower() not in ("up", "down"):
## invalid direction:
user_msg.append("""Invalid Page destination - no action was taken""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
return (title, body)
## swap the pages:
if direction.lower() == "up":
error_code = swap_elements_adjacent_pages_doctype_action(doctype=doctype, action=action,
page1=pagenum, page2=pagenum-1)
else:
error_code = swap_elements_adjacent_pages_doctype_action(doctype=doctype, action=action,
page1=pagenum, page2=pagenum+1)
if error_code == 0:
## pages swapped successfully:
## TODO : LOG PAGE SWAP
user_msg.append("""Page %s was successfully moved %swards""" % (pagenum, direction.capitalize()))
elif error_code == 1:
## pages are not adjacent:
user_msg.append("""Unable to move page - only adjacent pages can be swapped around""")
elif error_code == 2:
## at least one page out of legal range (e.g. trying to move a page to a position higher or lower
## than the number of pages:
user_msg.append("""Unable to move page to illegal position""")
elif error_code in (3, 4):
## Some sort of problem moving fields around!
## TODO : LOG ERROR
user_msg.append("""Error: there was a problem swapping the submission elements to their new pages.""")
user_msg.append("""An attempt was made to return the elements to their original pages - you """\
"""should verify that this was successful, or ask your administrator"""\
""" to fix the problem manually""")
elif error_code == 5:
## the elements from the first page were left stranded in the temporary page!
## TODO : LOG ERROR
user_msg.append("""Error: there was a problem swapping the submission elements to their new pages.""")
user_msg.append("""Some elements were left stranded on a temporary page. Please ask your administrator to"""\
""" fix this problem manually""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype, action=action, user_msg=user_msg)
return (title, body)
def _configure_doctype_delete_submission_page(doctype, action, pagenum):
user_msg = []
num_pages = get_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
if num_pages > 0:
## proceed with deletion
error_code = delete_allfields_submissionpage_doctype_action(doctype=doctype, action=action, pagenum=pagenum)
if error_code == 0:
## everything OK
## move elements from pages above the deleted page down by one page:
decrement_by_one_pagenumber_submissionelements_abovepage(doctype=doctype, action=action, frompage=pagenum)
## now decrement the number of pages associated with the submission:
error_code = decrement_by_one_number_submissionpages_doctype_action(doctype=doctype, action=action)
if error_code == 0:
## successfully deleted submission page
## TODO : LOG DELETION
user_msg.append("""Page number %s of Submission %s was successfully deleted."""\
% (pagenum, action))
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
else:
## error - either submission didn't exist, or multiple instances found
## TODO : LOG ERROR
user_msg.append("""The Submission elements were deleted from Page %s of the Submission "%s"."""\
""" However, it was not possible to delete the page itself."""\
% (pagenum, action))
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
else:
## unable to delete some or all fields from the page
## TODO : LOG ERROR
user_msg.append("""Error: Unable to delete some field elements from Page %s of Submission %s%s - """\
"""Page not deleted!""" % (pagenum, action, doctype))
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
elif num_pages == 0:
## no pages to delete for this submission
user_msg.append("""This Submission has no Pages - Cannot delete a Page!""")
(title, body) = _create_configure_doctype_submission_pages_form(doctype=doctype,
action=action,
user_msg=user_msg)
else:
## error - couldn't determine the number of pages for submission
## TODO : LOG ERROR
user_msg.append("""Unable to determine number of Submission Pages for Submission "%s" - """\
"""Cannot delete page %s"""\
% (action, pagenum))
(title, body) = _create_configure_doctype_form(doctype=doctype, user_msg=user_msg)
return (title, body)
def _create_configure_doctype_submission_pages_form(doctype,
action,
deletepagenum="",
user_msg=""):
"""Perform the necessary steps in order to display a list of the pages belonging to a given
submission of a given document type.
@param doctype: (string) the unique ID of the document type.
@param action: (string) the unique name/ID of the action.
@param user_msg: (string, or list) any message(s) to be displayed to the user.
@return: a tuple containing 2 strings - the page title and the page body.
"""
title = """Details of the Pages of the "%s" Submission of the "%s" Document Type:""" % (action, doctype)
submission_dets = get_cd_md_numbersubmissionpages_doctype_action(doctype=doctype, action=action)
if len(submission_dets) > 0:
cd = str(submission_dets[0][0])
md = str(submission_dets[0][1])
num_pages = submission_dets[0][2]
else:
(cd, md, num_pages) = ("", "", "0")
body = websubmitadmin_templates.tmpl_configuredoctype_list_submissionpages(doctype=doctype,
action=action,
number_pages=num_pages,
cd=cd,
md=md,
deletepagenum=deletepagenum,
user_msg=user_msg)
return (title, body)
| gpl-2.0 |
darkleons/odoo | addons/mrp_repair/mrp_repair.py | 4 | 36646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from datetime import datetime
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class mrp_repair(osv.osv):
_name = 'mrp.repair'
_inherit = 'mail.thread'
_description = 'Repair Order'
def _amount_untaxed(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates untaxed amount.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param field_name: Name of field.
@param arg: Argument
@param context: A standard dictionary for contextual values
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = 0.0
for line in repair.operations:
res[repair.id] += line.price_subtotal
for line in repair.fees_lines:
res[repair.id] += line.price_subtotal
cur = repair.pricelist_id.currency_id
res[repair.id] = cur_obj.round(cr, uid, cur, res[repair.id])
return res
def _amount_tax(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates taxed amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
#return {}.fromkeys(ids, 0)
cur_obj = self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for repair in self.browse(cr, uid, ids, context=context):
val = 0.0
cur = repair.pricelist_id.currency_id
for line in repair.operations:
#manage prices with tax included use compute_all instead of compute
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
for line in repair.fees_lines:
if line.to_invoice:
tax_calculate = tax_obj.compute_all(cr, uid, line.tax_id, line.price_unit, line.product_uom_qty, line.product_id, repair.partner_id)
for c in tax_calculate['taxes']:
val += c['amount']
res[repair.id] = cur_obj.round(cr, uid, cur, val)
return res
def _amount_total(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates total amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
untax = self._amount_untaxed(cr, uid, ids, field_name, arg, context=context)
tax = self._amount_tax(cr, uid, ids, field_name, arg, context=context)
cur_obj = self.pool.get('res.currency')
for id in ids:
repair = self.browse(cr, uid, id, context=context)
cur = repair.pricelist_id.currency_id
res[id] = cur_obj.round(cr, uid, cur, untax.get(id, 0.0) + tax.get(id, 0.0))
return res
def _get_default_address(self, cr, uid, ids, field_name, arg, context=None):
res = {}
partner_obj = self.pool.get('res.partner')
for data in self.browse(cr, uid, ids, context=context):
adr_id = False
if data.partner_id:
adr_id = partner_obj.address_get(cr, uid, [data.partner_id.id], ['default'])['default']
res[data.id] = adr_id
return res
def _get_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('operations', 'in', ids)], context=context)
def _get_fee_lines(self, cr, uid, ids, context=None):
return self.pool['mrp.repair'].search(cr, uid, [('fees_lines', 'in', ids)], context=context)
_columns = {
'name': fields.char('Repair Reference', required=True, states={'confirmed': [('readonly', True)]}, copy=False),
'product_id': fields.many2one('product.product', string='Product to Repair', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'partner_id': fields.many2one('res.partner', 'Partner', select=True, help='Choose partner for whom the order will be invoiced and delivered.', states={'confirmed': [('readonly', True)]}),
'address_id': fields.many2one('res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}),
'default_address_id': fields.function(_get_default_address, type="many2one", relation="res.partner"),
'state': fields.selection([
('draft', 'Quotation'),
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('under_repair', 'Under Repair'),
('ready', 'Ready to Repair'),
('2binvoiced', 'To be Invoiced'),
('invoice_except', 'Invoice Exception'),
('done', 'Repaired')
], 'Status', readonly=True, track_visibility='onchange', copy=False,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order. \
\n* The \'Confirmed\' status is used when a user confirms the repair order. \
\n* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed. \
\n* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done. \
\n* The \'Done\' status is set when repairing is completed.\
\n* The \'Cancelled\' status is used when user cancel repair order.'),
'location_id': fields.many2one('stock.location', 'Current Location', select=True, required=True, readonly=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'location_dest_id': fields.many2one('stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}),
'lot_id': fields.many2one('stock.production.lot', 'Repaired Lot', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot"),
'guarantee_limit': fields.date('Warranty Expiration', help="The warranty expiration limit is computed as: last move date + warranty defined on selected product. If the current date is below the warranty expiration limit, each operation and fee you will add will be set as 'not to invoiced' by default. Note that you can change manually afterwards.", states={'confirmed': [('readonly', True)]}),
'operations': fields.one2many('mrp.repair.line', 'repair_id', 'Operation Lines', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', help='Pricelist of the selected partner.'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoicing Address'),
'invoice_method': fields.selection([
("none", "No Invoice"),
("b4repair", "Before Repair"),
("after_repair", "After Repair")
], "Invoice Method",
select=True, required=True, states={'draft': [('readonly', False)]}, readonly=True, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.'),
'invoice_id': fields.many2one('account.invoice', 'Invoice', readonly=True, track_visibility="onchange", copy=False),
'move_id': fields.many2one('stock.move', 'Move', readonly=True, help="Move created by the repair order", track_visibility="onchange", copy=False),
'fees_lines': fields.one2many('mrp.repair.fee', 'repair_id', 'Fees', readonly=True, states={'draft': [('readonly', False)]}, copy=True),
'internal_notes': fields.text('Internal Notes'),
'quotation_notes': fields.text('Quotation Notes'),
'company_id': fields.many2one('res.company', 'Company'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'repaired': fields.boolean('Repaired', readonly=True, copy=False),
'amount_untaxed': fields.function(_amount_untaxed, string='Untaxed Amount',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_tax': fields.function(_amount_tax, string='Taxes',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
'amount_total': fields.function(_amount_total, string='Total',
store={
'mrp.repair': (lambda self, cr, uid, ids, c={}: ids, ['operations', 'fees_lines'], 10),
'mrp.repair.line': (_get_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
'mrp.repair.fee': (_get_fee_lines, ['price_unit', 'price_subtotal', 'product_id', 'tax_id', 'product_uom_qty', 'product_uom'], 10),
}),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'state': lambda *a: 'draft',
'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'mrp.repair'),
'invoice_method': lambda *a: 'none',
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.repair', context=context),
'pricelist_id': lambda self, cr, uid, context: self.pool.get('product.pricelist').search(cr, uid, [('type', '=', 'sale')])[0],
'product_qty': 1.0,
'location_id': _default_stock_location,
}
_sql_constraints = [
('name', 'unique (name)', 'The name of the Repair Order must be unique!'),
]
def onchange_product_id(self, cr, uid, ids, product_id=None):
""" On change of product sets some values.
@param product_id: Changed product
@return: Dictionary of values.
"""
product = False
if product_id:
product = self.pool.get("product.product").browse(cr, uid, product_id)
return {'value': {
'guarantee_limit': False,
'lot_id': False,
'product_uom': product and product.uom_id.id or False,
}
}
def onchange_product_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_location_id(self, cr, uid, ids, location_id=None):
""" On change of location
"""
return {'value': {'location_dest_id': location_id}}
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_partner_id(self, cr, uid, ids, part, address_id):
""" On change of partner sets the values of partner address,
partner invoice address and pricelist.
@param part: Changed id of partner.
@param address_id: Address id from current record.
@return: Dictionary of values.
"""
part_obj = self.pool.get('res.partner')
pricelist_obj = self.pool.get('product.pricelist')
if not part:
return {'value': {
'address_id': False,
'partner_invoice_id': False,
'pricelist_id': pricelist_obj.search(cr, uid, [('type', '=', 'sale')])[0]
}
}
addr = part_obj.address_get(cr, uid, [part], ['delivery', 'invoice', 'default'])
partner = part_obj.browse(cr, uid, part)
pricelist = partner.property_product_pricelist and partner.property_product_pricelist.id or False
return {'value': {
'address_id': addr['delivery'] or addr['default'],
'partner_invoice_id': addr['invoice'],
'pricelist_id': pricelist
}
}
def action_cancel_draft(self, cr, uid, ids, *args):
""" Cancels repair order when it is in 'Draft' state.
@param *arg: Arguments
@return: True
"""
if not len(ids):
return False
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids):
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'draft'})
self.write(cr, uid, ids, {'state': 'draft'})
return self.create_workflow(cr, uid, ids)
def action_confirm(self, cr, uid, ids, *args):
""" Repair order state is set to 'To be invoiced' when invoice method
is 'Before repair' else state becomes 'Confirmed'.
@param *arg: Arguments
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for o in self.browse(cr, uid, ids):
if (o.invoice_method == 'b4repair'):
self.write(cr, uid, [o.id], {'state': '2binvoiced'})
else:
self.write(cr, uid, [o.id], {'state': 'confirmed'})
for line in o.operations:
if line.product_id.track_production:
raise osv.except_osv(_('Warning!'), _("Serial number is required for operation line with product '%s'") % (line.product_id.name))
mrp_line_obj.write(cr, uid, [l.id for l in o.operations], {'state': 'confirmed'})
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels repair order.
@return: True
"""
mrp_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
if not repair.invoiced:
mrp_line_obj.write(cr, uid, [l.id for l in repair.operations], {'state': 'cancel'}, context=context)
else:
raise osv.except_osv(_('Warning!'), _('Repair order is already invoiced.'))
return self.write(cr, uid, ids, {'state': 'cancel'})
def wkf_invoice_create(self, cr, uid, ids, *args):
self.action_invoice_create(cr, uid, ids)
return True
def action_invoice_create(self, cr, uid, ids, group=False, context=None):
""" Creates invoice(s) for repair order.
@param group: It is set to true when group invoice is to be generated.
@return: Invoice Ids.
"""
res = {}
invoices_group = {}
inv_line_obj = self.pool.get('account.invoice.line')
inv_obj = self.pool.get('account.invoice')
repair_line_obj = self.pool.get('mrp.repair.line')
repair_fee_obj = self.pool.get('mrp.repair.fee')
for repair in self.browse(cr, uid, ids, context=context):
res[repair.id] = False
if repair.state in ('draft', 'cancel') or repair.invoice_id:
continue
if not (repair.partner_id.id and repair.partner_invoice_id.id):
raise osv.except_osv(_('No partner!'), _('You have to select a Partner Invoice Address in the repair form!'))
comment = repair.quotation_notes
if (repair.invoice_method != 'none'):
if group and repair.partner_invoice_id.id in invoices_group:
inv_id = invoices_group[repair.partner_invoice_id.id]
invoice = inv_obj.browse(cr, uid, inv_id)
invoice_vals = {
'name': invoice.name + ', ' + repair.name,
'origin': invoice.origin + ', ' + repair.name,
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
}
inv_obj.write(cr, uid, [inv_id], invoice_vals, context=context)
else:
if not repair.partner_id.property_account_receivable:
raise osv.except_osv(_('Error!'), _('No account defined for partner "%s".') % repair.partner_id.name)
account_id = repair.partner_id.property_account_receivable.id
inv = {
'name': repair.name,
'origin': repair.name,
'type': 'out_invoice',
'account_id': account_id,
'partner_id': repair.partner_invoice_id.id or repair.partner_id.id,
'currency_id': repair.pricelist_id.currency_id.id,
'comment': repair.quotation_notes,
'fiscal_position': repair.partner_id.property_account_position.id
}
inv_id = inv_obj.create(cr, uid, inv)
invoices_group[repair.partner_invoice_id.id] = inv_id
self.write(cr, uid, repair.id, {'invoiced': True, 'invoice_id': inv_id})
for operation in repair.operations:
if operation.to_invoice:
if group:
name = repair.name + '-' + operation.name
else:
name = operation.name
if operation.product_id.property_account_income:
account_id = operation.product_id.property_account_income.id
elif operation.product_id.categ_id.property_account_income_categ:
account_id = operation.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % operation.product_id.name)
invoice_line_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': operation.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in operation.tax_id])],
'uos_id': operation.product_uom.id,
'price_unit': operation.price_unit,
'price_subtotal': operation.product_uom_qty * operation.price_unit,
'product_id': operation.product_id and operation.product_id.id or False
})
repair_line_obj.write(cr, uid, [operation.id], {'invoiced': True, 'invoice_line_id': invoice_line_id})
for fee in repair.fees_lines:
if fee.to_invoice:
if group:
name = repair.name + '-' + fee.name
else:
name = fee.name
if not fee.product_id:
raise osv.except_osv(_('Warning!'), _('No product defined on Fees!'))
if fee.product_id.property_account_income:
account_id = fee.product_id.property_account_income.id
elif fee.product_id.categ_id.property_account_income_categ:
account_id = fee.product_id.categ_id.property_account_income_categ.id
else:
raise osv.except_osv(_('Error!'), _('No account defined for product "%s".') % fee.product_id.name)
invoice_fee_id = inv_line_obj.create(cr, uid, {
'invoice_id': inv_id,
'name': name,
'origin': repair.name,
'account_id': account_id,
'quantity': fee.product_uom_qty,
'invoice_line_tax_id': [(6, 0, [x.id for x in fee.tax_id])],
'uos_id': fee.product_uom.id,
'product_id': fee.product_id and fee.product_id.id or False,
'price_unit': fee.price_unit,
'price_subtotal': fee.product_uom_qty * fee.price_unit
})
repair_fee_obj.write(cr, uid, [fee.id], {'invoiced': True, 'invoice_line_id': invoice_fee_id})
res[repair.id] = inv_id
return res
def action_repair_ready(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Ready'
@return: True
"""
for repair in self.browse(cr, uid, ids, context=context):
self.pool.get('mrp.repair.line').write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
self.write(cr, uid, [repair.id], {'state': 'ready'})
return True
def action_repair_start(self, cr, uid, ids, context=None):
""" Writes repair order state to 'Under Repair'
@return: True
"""
repair_line = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
repair_line.write(cr, uid, [l.id for
l in repair.operations], {'state': 'confirmed'}, context=context)
repair.write({'state': 'under_repair'})
return True
def action_repair_end(self, cr, uid, ids, context=None):
""" Writes repair order state to 'To be invoiced' if invoice method is
After repair else state is set to 'Ready'.
@return: True
"""
for order in self.browse(cr, uid, ids, context=context):
val = {}
val['repaired'] = True
if (not order.invoiced and order.invoice_method == 'after_repair'):
val['state'] = '2binvoiced'
elif (not order.invoiced and order.invoice_method == 'b4repair'):
val['state'] = 'ready'
else:
pass
self.write(cr, uid, [order.id], val)
return True
def wkf_repair_done(self, cr, uid, ids, *args):
self.action_repair_done(cr, uid, ids)
return True
def action_repair_done(self, cr, uid, ids, context=None):
""" Creates stock move for operation and stock move for final product of repair order.
@return: Move ids of final products
"""
res = {}
move_obj = self.pool.get('stock.move')
repair_line_obj = self.pool.get('mrp.repair.line')
for repair in self.browse(cr, uid, ids, context=context):
move_ids = []
for move in repair.operations:
move_id = move_obj.create(cr, uid, {
'name': move.name,
'product_id': move.product_id.id,
'restrict_lot_id': move.lot_id.id,
'product_uom_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
'state': 'assigned',
})
move_ids.append(move_id)
repair_line_obj.write(cr, uid, [move.id], {'move_id': move_id, 'state': 'done'}, context=context)
move_id = move_obj.create(cr, uid, {
'name': repair.name,
'product_id': repair.product_id.id,
'product_uom': repair.product_uom.id or repair.product_id.uom_id.id,
'product_uom_qty': repair.product_qty,
'partner_id': repair.address_id and repair.address_id.id or False,
'location_id': repair.location_id.id,
'location_dest_id': repair.location_dest_id.id,
'restrict_lot_id': repair.lot_id.id,
})
move_ids.append(move_id)
move_obj.action_done(cr, uid, move_ids, context=context)
self.write(cr, uid, [repair.id], {'state': 'done', 'move_id': move_id}, context=context)
res[repair.id] = move_id
return res
class ProductChangeMixin(object):
def product_id_change(self, cr, uid, ids, pricelist, product, uom=False,
product_uom_qty=0, partner_id=False, guarantee_limit=False):
""" On change of product it sets product quantity, tax account, name,
uom of product, unit price and price subtotal.
@param pricelist: Pricelist of current record.
@param product: Changed id of product.
@param uom: UoM of current record.
@param product_uom_qty: Quantity of current record.
@param partner_id: Partner of current record.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values and warning message.
"""
result = {}
warning = {}
if not product_uom_qty:
product_uom_qty = 1
result['product_uom_qty'] = product_uom_qty
if product:
product_obj = self.pool.get('product.product').browse(cr, uid, product)
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, partner.property_account_position, product_obj.taxes_id)
result['name'] = product_obj.display_name
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id or False
if not pricelist:
warning = {
'title': _('No Pricelist!'),
'message':
_('You have to select a pricelist in the Repair form !\n'
'Please set one before choosing a product.')
}
else:
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, product_uom_qty, partner_id, {'uom': uom})[pricelist]
if price is False:
warning = {
'title': _('No valid pricelist line found !'),
'message':
_("Couldn't find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
}
else:
result.update({'price_unit': price, 'price_subtotal': price * product_uom_qty})
return {'value': result, 'warning': warning}
class mrp_repair_line(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.line'
_description = 'Repair Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.to_invoice and line.price_unit * line.product_uom_qty or 0
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'name': fields.char('Description', required=True),
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', ondelete='cascade', select=True),
'type': fields.selection([('add', 'Add'), ('remove', 'Remove')], 'Type', required=True),
'to_invoice': fields.boolean('To Invoice'),
'product_id': fields.many2one('product.product', 'Product', required=True),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'price_unit': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', required=True, select=True),
'move_id': fields.many2one('stock.move', 'Inventory Move', readonly=True, copy=False),
'lot_id': fields.many2one('stock.production.lot', 'Lot'),
'state': fields.selection([
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('done', 'Done'),
('cancel', 'Cancelled')], 'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically as draft when repair order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when repair order in confirm status. \
\n* The \'Done\' status is set automatically when repair order is completed.\
\n* The \'Cancelled\' status is set automatically when user cancel repair order.'),
}
_defaults = {
'state': lambda *a: 'draft',
'product_uom_qty': lambda *a: 1,
}
def onchange_operation_type(self, cr, uid, ids, type, guarantee_limit, company_id=False, context=None):
""" On change of operation type it sets source location, destination location
and to invoice field.
@param product: Changed operation type.
@param guarantee_limit: Guarantee limit of current record.
@return: Dictionary of values.
"""
if not type:
return {'value': {
'location_id': False,
'location_dest_id': False
}}
location_obj = self.pool.get('stock.location')
warehouse_obj = self.pool.get('stock.warehouse')
location_id = location_obj.search(cr, uid, [('usage', '=', 'production')], context=context)
location_id = location_id and location_id[0] or False
if type == 'add':
# TOCHECK: Find stock location for user's company warehouse or
# repair order's company's warehouse (company_id field is added in fix of lp:831583)
args = company_id and [('company_id', '=', company_id)] or []
warehouse_ids = warehouse_obj.search(cr, uid, args, context=context)
stock_id = False
if warehouse_ids:
stock_id = warehouse_obj.browse(cr, uid, warehouse_ids[0], context=context).lot_stock_id.id
to_invoice = (guarantee_limit and datetime.strptime(guarantee_limit, '%Y-%m-%d') < datetime.now())
return {'value': {
'to_invoice': to_invoice,
'location_id': stock_id,
'location_dest_id': location_id
}}
scrap_location_ids = location_obj.search(cr, uid, [('scrap_location', '=', True)], context=context)
return {'value': {
'to_invoice': False,
'location_id': location_id,
'location_dest_id': scrap_location_ids and scrap_location_ids[0] or False,
}}
class mrp_repair_fee(osv.osv, ProductChangeMixin):
_name = 'mrp.repair.fee'
_description = 'Repair Fees Line'
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates amount.
@param field_name: Name of field.
@param arg: Argument
@return: Dictionary of values.
"""
res = {}
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.to_invoice and line.price_unit * line.product_uom_qty or 0
cur = line.repair_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
_columns = {
'repair_id': fields.many2one('mrp.repair', 'Repair Order Reference', required=True, ondelete='cascade', select=True),
'name': fields.char('Description', select=True, required=True),
'product_id': fields.many2one('product.product', 'Product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'price_unit': fields.float('Unit Price', required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),
'tax_id': fields.many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes'),
'invoice_line_id': fields.many2one('account.invoice.line', 'Invoice Line', readonly=True, copy=False),
'to_invoice': fields.boolean('To Invoice'),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
}
_defaults = {
'to_invoice': lambda *a: True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
repotvsupertuga/repo | plugin.video.jami/resources/lib/resolvers/fastvideo.py | 23 | 1471 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from resources.lib.libraries import client
from resources.lib.libraries import jsunpack
def resolve(url):
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://rapidvideo.ws/embed-%s.html' % url
result = client.request(url, mobile=True)
result = re.compile('(eval.*?\)\)\))').findall(result)[-1]
result = jsunpack.unpack(result)
url = client.parseDOM(result, 'embed', ret='src')
url += re.compile("file *: *[\'|\"](.+?)[\'|\"]").findall(result)
url = [i for i in url if not i.endswith('.srt')]
url = 'http://' + url[0].split('://', 1)[-1]
return url
except:
return
| gpl-2.0 |
MRigal/django | django/contrib/gis/admin/options.py | 379 | 5649 | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.db import models
from django.contrib.gis.gdal import HAS_GDAL, OGRGeomType
from django.core.exceptions import ImproperlyConfigured
spherical_mercator_srid = 3857
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.13/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing 2D GeometryFields (OpenLayers 2 does not support
3D editing).
"""
if isinstance(db_field, models.GeometryField) and db_field.dim < 3:
kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION':
collection_type = 'Any'
else:
collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon': self.default_lon,
'default_lat': self.default_lat,
'default_zoom': self.default_zoom,
'display_wkt': self.debug or self.display_wkt,
'geom_type': OGRGeomType(db_field.geom_type),
'field_name': db_field.name,
'is_collection': is_collection,
'scrollable': self.scrollable,
'layerswitcher': self.layerswitcher,
'collection_type': collection_type,
'is_generic': db_field.geom_type == 'GEOMETRY',
'is_linestring': db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon': db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point': db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom': self.num_zoom,
'max_zoom': self.max_zoom,
'min_zoom': self.min_zoom,
'units': self.units, # likely should get from object
'max_resolution': self.max_resolution,
'max_extent': self.max_extent,
'modifiable': self.modifiable,
'mouse_position': self.mouse_position,
'scale_text': self.scale_text,
'map_width': self.map_width,
'map_height': self.map_height,
'point_zoom': self.point_zoom,
'srid': self.map_srid,
'display_srid': self.display_srid,
'wms_url': self.wms_url,
'wms_layer': self.wms_layer,
'wms_name': self.wms_name,
'wms_options': wms_options,
'debug': self.debug,
}
return OLMap
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
def __init__(self, *args):
if not HAS_GDAL:
raise ImproperlyConfigured("OSMGeoAdmin is not usable without GDAL libs installed")
super(OSMGeoAdmin, self).__init__(*args)
| bsd-3-clause |
thaihungle/deepexp | kbtext-mann/get_stats.py | 3 | 2790 | import json
import util
from collections import defaultdict
def get_fb_stats(freebase_data_file):
with open(freebase_data_file) as fb:
fact_counter = 0
relation_set = set()
entity_set = set()
for line in fb:
line = line.strip()
line = line[1:-1]
e1, r1, r2, e2 = [a.strip('"') for a in [x.strip() for x in line.split(',')]]
r = r1 + '_' + r2
fact_counter += 1
relation_set.add(r)
entity_set.add(e1)
entity_set.add(e2)
print("Total num of facts {}".format(fact_counter))
print("Num unique entities {}".format(len(entity_set)))
print("Num unique relations {}".format(len(relation_set)))
def get_questions_stats(train_data_file, dev_data_file):
print('1. Getting the number of blanks')
blank_str = '_blank_'
num_blanks_map = defaultdict(int)
word_freq_train = defaultdict(int)
with open(train_data_file) as train_file:
for counter, line in enumerate(util.verboserate(train_file)):
line = line.strip()
q_json = json.loads(line)
q = q_json['sentence']
count = q.count(blank_str)
num_blanks_map[count] += 1
words = q.split(' ')
for word in words:
word = word.strip()
word_freq_train[word] += 1
a_list = q_json['answerSubset']
for a in a_list:
word_freq_train[a] = word_freq_train[word] + 1
print(num_blanks_map)
print '2. Number of word types in the train set {}'.format(len(word_freq_train))
print '3. Checking overlap with the dev answers'
dev_answers_present = set()
dev_answers_oov = set()
dev_answers = set()
with open(dev_data_file) as dev_file:
for line in dev_file:
line = line.strip()
dev_json = json.loads(line)
a_list = dev_json['answerSubset']
for a in a_list:
if a in word_freq_train:
dev_answers_present.add(a)
else:
dev_answers_oov.add(a)
dev_answers.add(a)
print 'Number of unique dev answer strings {}'.format(len(dev_answers))
print 'Number of oov answer strings in dev set {}'.format(len(dev_answers_oov))
print 'Number of dev answer strings which have atleast 1 occurrences in train set {}'.format(
len(dev_answers_present))
freebase_data_file = "/home/rajarshi/research/graph-parser/data/spades/freebase.spades.txt"
train_data_file = "/home/rajarshi/research/graph-parser/data/spades/train.json"
dev_data_file = "/home/rajarshi/research/graph-parser/data/spades/dev.json"
# get_fb_stats()
get_questions_stats(train_data_file, dev_data_file)
| mit |
sunzhongwei/pelican | pelican/readers.py | 4 | 21032 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os
import re
import docutils
import docutils.core
import docutils.io
from docutils.writers.html4css1 import HTMLTranslator
import six
# import the directives to have pygments support
from pelican import rstdirectives # NOQA
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
try:
from html import escape
except ImportError:
from cgi import escape
from six.moves.html_parser import HTMLParser
from pelican import signals
from pelican.contents import Page, Category, Tag, Author
from pelican.utils import get_date, pelican_open, FileStampDataCacher, SafeDatetime
METADATA_PROCESSORS = {
'tags': lambda x, y: [Tag(tag, y) for tag in x.split(',')],
'date': lambda x, y: get_date(x),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip(),
'category': Category,
'author': Author,
'authors': lambda x, y: [Author(author.strip(), y) for author in x.split(',')],
}
logger = logging.getLogger(__name__)
class BaseReader(object):
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
HTMLTranslator.__init__(self, document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node):
visitor = _FieldBodyTranslator(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files"""
enabled = bool(docutils)
file_extensions = ['rst']
class FileInput(docutils.io.FileInput):
"""Patch docutils.io.FileInput to remove "U" mode in py3.
Universal newlines is enabled by default and "U" mode is deprecated
in py3.
"""
def __init__(self, *args, **kwargs):
if six.PY3:
kwargs['mode'] = kwargs.get('mode', 'r').replace('U', '')
docutils.io.FileInput.__init__(self, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(RstReader, self).__init__(*args, **kwargs)
def _parse_metadata(self, document):
"""Return the dict containing document metadata"""
output = {}
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name == 'summary':
value = render_node_to_html(document, body_elem)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
value = ','.join(value) # METADATA_PROCESSORS expects a string
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'exit_status_level': 2,
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
source_class=self.FileInput,
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.writer.translator_class = PelicanHTMLTranslator
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish(enable_exit_status=True)
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super(MarkdownReader, self).__init__(*args, **kwargs)
self.extensions = list(self.settings['MD_EXTENSIONS'])
if 'meta' not in self.extensions:
self.extensions.append('meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
output = {}
for name, value in meta.items():
name = name.lower()
if name == "summary":
# handle summary metadata as markdown
# summary metadata is special case and join all list values
summary_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
summary = self._md.convert(summary_values)
output[name] = self.process_metadata(name, summary)
elif name in METADATA_PROCESSORS:
if len(value) > 1:
logger.warning('Duplicate definition of `%s` '
'for %s. Using first one.', name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(extensions=self.extensions)
with pelican_open(source_path) as text:
content = self._md.convert(text)
metadata = self._parse_metadata(self._md.Meta)
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
try:
# Python 3.4+
HTMLParser.__init__(self, convert_charrefs=False)
except TypeError:
HTMLParser.__init__(self)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape(tag))
for k, v in attrs:
result += ' ' + escape(k)
if v is not None:
result += '="{}"'.format(escape(v))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_serialized = ', '.join(['{}="{}"'.format(k, v) for k, v in attrs])
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': ("Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'")})
if name == 'keywords':
name = 'tags'
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super(Readers, self).__init__(settings, cache_name,
caching_policy, load_policy,
)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = os.path.relpath(path, base_path)
logger.debug('Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug('Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = default_metadata(
settings=self.settings, process=reader.process_metadata)
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
self.cache_data(path, (content, reader_metadata))
metadata.update(reader_metadata)
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(text, self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug('Signal %s.send(%s, <metadata>)',
context_signal.name, context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and settings['DEFAULT_DATE'] != 'fs':
metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = SafeDatetime.fromtimestamp(
os.stat(full_path).st_ctime)
metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get(
source_path, {}))
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': '(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... '(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': SafeDatetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.insert(0, ('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
if k not in metadata:
k = k.lower() # metadata must be lowercase
if process:
v = process(k, v)
metadata[k] = v
return metadata
| agpl-3.0 |
BillKeenan/lets-encrypt-preview | letsencrypt/tests/proof_of_possession_test.py | 37 | 3484 | """Tests for letsencrypt.proof_of_possession."""
import os
import tempfile
import unittest
import mock
from acme import challenges
from acme import jose
from acme import messages
from letsencrypt import achallenges
from letsencrypt import proof_of_possession
from letsencrypt.display import util as display_util
from letsencrypt.tests import test_util
CERT0_PATH = test_util.vector_path("cert.der")
CERT2_PATH = test_util.vector_path("dsa_cert.pem")
CERT2_KEY_PATH = test_util.vector_path("dsa512_key.pem")
CERT3_PATH = test_util.vector_path("matching_cert.pem")
CERT3_KEY_PATH = test_util.vector_path("rsa512_key_2.pem")
CERT3_KEY = test_util.load_rsa_private_key("rsa512_key_2.pem").public_key()
class ProofOfPossessionTest(unittest.TestCase):
def setUp(self):
self.installer = mock.MagicMock()
self.cert1_path = tempfile.mkstemp()[1]
certs = [CERT0_PATH, self.cert1_path, CERT2_PATH, CERT3_PATH]
keys = [None, None, CERT2_KEY_PATH, CERT3_KEY_PATH]
self.installer.get_all_certs_keys.return_value = zip(
certs, keys, 4 * [None])
self.proof_of_pos = proof_of_possession.ProofOfPossession(
self.installer)
hints = challenges.ProofOfPossession.Hints(
jwk=jose.JWKRSA(key=CERT3_KEY), cert_fingerprints=(),
certs=(), serial_numbers=(), subject_key_identifiers=(),
issuers=(), authorized_for=())
chall = challenges.ProofOfPossession(
alg=jose.RS256, nonce='zczv4HMLVe_0kimJ25Juig', hints=hints)
challb = messages.ChallengeBody(
chall=chall, uri="http://example", status=messages.STATUS_PENDING)
self.achall = achallenges.ProofOfPossession(
challb=challb, domain="example.com")
def tearDown(self):
os.remove(self.cert1_path)
def test_perform_bad_challenge(self):
hints = challenges.ProofOfPossession.Hints(
jwk=jose.jwk.JWKOct(key="foo"), cert_fingerprints=(),
certs=(), serial_numbers=(), subject_key_identifiers=(),
issuers=(), authorized_for=())
chall = challenges.ProofOfPossession(
alg=jose.HS512, nonce='zczv4HMLVe_0kimJ25Juig', hints=hints)
challb = messages.ChallengeBody(
chall=chall, uri="http://example", status=messages.STATUS_PENDING)
self.achall = achallenges.ProofOfPossession(
challb=challb, domain="example.com")
self.assertEqual(self.proof_of_pos.perform(self.achall), None)
def test_perform_no_input(self):
self.assertTrue(self.proof_of_pos.perform(self.achall).verify())
@mock.patch("letsencrypt.proof_of_possession.zope.component.getUtility")
def test_perform_with_input(self, mock_input):
# Remove the matching certificate
self.installer.get_all_certs_keys.return_value.pop()
mock_input().input.side_effect = [(display_util.CANCEL, ""),
(display_util.OK, CERT0_PATH),
(display_util.OK, "imaginary_file"),
(display_util.OK, CERT3_KEY_PATH)]
self.assertFalse(self.proof_of_pos.perform(self.achall))
self.assertFalse(self.proof_of_pos.perform(self.achall))
self.assertFalse(self.proof_of_pos.perform(self.achall))
self.assertTrue(self.proof_of_pos.perform(self.achall).verify())
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
marcore/edx-platform | common/djangoapps/terrain/stubs/lti.py | 44 | 12380 | """
Stub implementation of LTI Provider.
What is supported:
------------------
1.) This LTI Provider can service only one Tool Consumer at the same time. It is
not possible to have this LTI multiple times on a single page in LMS.
"""
from uuid import uuid4
import textwrap
import urllib
from oauthlib.oauth1.rfc5849 import signature, parameters
import oauthlib.oauth1
import hashlib
import base64
import mock
import requests
from http import StubHttpRequestHandler, StubHttpService
class StubLtiHandler(StubHttpRequestHandler):
"""
A handler for LTI POST and GET requests.
"""
DEFAULT_CLIENT_KEY = 'test_client_key'
DEFAULT_CLIENT_SECRET = 'test_client_secret'
DEFAULT_LTI_ENDPOINT = 'correct_lti_endpoint'
DEFAULT_LTI_ADDRESS = 'http://127.0.0.1:{port}/'
def do_GET(self):
"""
Handle a GET request from the client and sends response back.
Used for checking LTI Provider started correctly.
"""
self.send_response(200, 'This is LTI Provider.', {'Content-type': 'text/plain'})
def do_POST(self):
"""
Handle a POST request from the client and sends response back.
"""
if 'grade' in self.path and self._send_graded_result().status_code == 200:
status_message = 'LTI consumer (edX) responded with XML content:<br>' + self.server.grade_data['TC answer']
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_outcome' in self.path and self._send_lti2_outcome().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
elif 'lti2_delete' in self.path and self._send_lti2_delete().status_code == 200:
status_message = 'LTI consumer (edX) responded with HTTP {}<br>'.format(
self.server.grade_data['status_code'])
content = self._create_content(status_message)
self.send_response(200, content)
# Respond to request with correct lti endpoint
elif self._is_correct_lti_request():
params = {k: v for k, v in self.post_dict.items() if k != 'oauth_signature'}
if self._check_oauth_signature(params, self.post_dict.get('oauth_signature', "")):
status_message = "This is LTI tool. Success."
# Set data for grades what need to be stored as server data
if 'lis_outcome_service_url' in self.post_dict:
self.server.grade_data = {
'callback_url': self.post_dict.get('lis_outcome_service_url').replace('https', 'http'),
'sourcedId': self.post_dict.get('lis_result_sourcedid')
}
submit_url = '//{}:{}'.format(*self.server.server_address)
content = self._create_content(status_message, submit_url)
self.send_response(200, content)
else:
content = self._create_content("Wrong LTI signature")
self.send_response(200, content)
else:
content = self._create_content("Invalid request URL")
self.send_response(500, content)
def _send_graded_result(self):
"""
Send grade request.
"""
values = {
'textString': 0.5,
'sourcedId': self.server.grade_data['sourcedId'],
'imsx_messageIdentifier': uuid4().hex,
}
payload = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier> /
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<replaceResultRequest>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{textString}</textString>
</resultScore>
</result>
</resultRecord>
</replaceResultRequest>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
data = payload.format(**values)
url = self.server.grade_data['callback_url']
headers = {
'Content-Type': 'application/xml',
'X-Requested-With': 'XMLHttpRequest',
'Authorization': self._oauth_sign(url, data)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.post(url, data=data, headers=headers, verify=False)
self.server.grade_data['TC answer'] = response.content
return response
def _send_lti2_outcome(self):
"""
Send a grade back to consumer
"""
payload = textwrap.dedent("""
{{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result",
"resultScore" : {score},
"comment" : "This is awesome."
}}
""")
data = payload.format(score=0.8)
return self._send_lti2(data)
def _send_lti2_delete(self):
"""
Send a delete back to consumer
"""
payload = textwrap.dedent("""
{
"@context" : "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type" : "Result"
}
""")
return self._send_lti2(payload)
def _send_lti2(self, payload):
"""
Send lti2 json result service request.
"""
### We compute the LTI V2.0 service endpoint from the callback_url (which is set by the launch call)
url = self.server.grade_data['callback_url']
url_parts = url.split('/')
url_parts[-1] = "lti_2_0_result_rest_handler"
anon_id = self.server.grade_data['sourcedId'].split(":")[-1]
url_parts.extend(["user", anon_id])
new_url = '/'.join(url_parts)
content_type = 'application/vnd.ims.lis.v2.result+json'
headers = {
'Content-Type': content_type,
'Authorization': self._oauth_sign(new_url, payload,
method='PUT',
content_type=content_type)
}
# Send request ignoring verifirecation of SSL certificate
response = requests.put(new_url, data=payload, headers=headers, verify=False)
self.server.grade_data['status_code'] = response.status_code
self.server.grade_data['TC answer'] = response.content
return response
def _create_content(self, response_text, submit_url=None):
"""
Return content (str) either for launch, send grade or get result from TC.
"""
if submit_url:
submit_form = textwrap.dedent("""
<form action="{submit_url}/grade" method="post">
<input type="submit" name="submit-button" value="Submit">
</form>
<form action="{submit_url}/lti2_outcome" method="post">
<input type="submit" name="submit-lti2-button" value="Submit">
</form>
<form action="{submit_url}/lti2_delete" method="post">
<input type="submit" name="submit-lti2-delete-button" value="Submit">
</form>
""").format(submit_url=submit_url)
else:
submit_form = ''
# Show roles only for LTI launch.
if self.post_dict.get('roles'):
role = '<h5>Role: {}</h5>'.format(self.post_dict['roles'])
else:
role = ''
response_str = textwrap.dedent("""
<html>
<head>
<title>TEST TITLE</title>
</head>
<body>
<div>
<h2>IFrame loaded</h2>
<h3>Server response is:</h3>
<h3 class="result">{response}</h3>
{role}
</div>
{submit_form}
</body>
</html>
""").format(response=response_text, role=role, submit_form=submit_form)
# Currently LTI module doublequotes the lis_result_sourcedid parameter.
# Unquote response two times.
return urllib.unquote(urllib.unquote(response_str))
def _is_correct_lti_request(self):
"""
Return a boolean indicating whether the URL path is a valid LTI end-point.
"""
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
return lti_endpoint in self.path
def _oauth_sign(self, url, body, content_type=u'application/x-www-form-urlencoded', method=u'POST'):
"""
Signs request and returns signed Authorization header.
"""
client_key = self.server.config.get('client_key', self.DEFAULT_CLIENT_KEY)
client_secret = self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET)
client = oauthlib.oauth1.Client(
client_key=unicode(client_key),
client_secret=unicode(client_secret)
)
headers = {
# This is needed for body encoding:
'Content-Type': content_type,
}
# Calculate and encode body hash. See http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
sha1 = hashlib.sha1()
sha1.update(body)
oauth_body_hash = unicode(base64.b64encode(sha1.digest()))
params = client.get_oauth_params(None)
params.append((u'oauth_body_hash', oauth_body_hash))
mock_request = mock.Mock(
uri=unicode(urllib.unquote(url)),
headers=headers,
body=u"",
decoded_body=u"",
oauth_params=params,
http_method=unicode(method),
)
sig = client.get_oauth_signature(mock_request)
mock_request.oauth_params.append((u'oauth_signature', sig))
new_headers = parameters.prepare_headers(mock_request.oauth_params, headers, realm=None)
return new_headers['Authorization']
def _check_oauth_signature(self, params, client_signature):
"""
Checks oauth signature from client.
`params` are params from post request except signature,
`client_signature` is signature from request.
Builds mocked request and verifies hmac-sha1 signing::
1. builds string to sign from `params`, `url` and `http_method`.
2. signs it with `client_secret` which comes from server settings.
3. obtains signature after sign and then compares it with request.signature
(request signature comes form client in request)
Returns `True` if signatures are correct, otherwise `False`.
"""
client_secret = unicode(self.server.config.get('client_secret', self.DEFAULT_CLIENT_SECRET))
port = self.server.server_address[1]
lti_base = self.DEFAULT_LTI_ADDRESS.format(port=port)
lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)
url = lti_base + lti_endpoint
request = mock.Mock()
request.params = [(unicode(k), unicode(v)) for k, v in params.items()]
request.uri = unicode(url)
request.http_method = u'POST'
request.signature = unicode(client_signature)
return signature.verify_hmac_sha1(request, client_secret)
class StubLtiService(StubHttpService):
"""
A stub LTI provider server that responds
to POST and GET requests to localhost.
"""
HANDLER_CLASS = StubLtiHandler
| agpl-3.0 |
python-tap/tappy | tap/tests/test_result.py | 2 | 3581 | # Copyright (c) 2019, Matt Layman and contributors
import contextlib
import os
import unittest
import unittest.case
from tap.i18n import _
from tap.runner import TAPTestResult
from tap.tests import TestCase
from tap.tracker import Tracker
class FakeTestCase(unittest.TestCase):
def runTest(self):
pass
@contextlib.contextmanager
def subTest(self, *args, **kwargs):
try:
self._subtest = unittest.case._SubTest(self, object(), {})
yield
finally:
self._subtest = None
def __call__(self, result):
pass
class TestTAPTestResult(TestCase):
@classmethod
def _make_one(cls):
# Yep, the stream is not being closed.
stream = open(os.devnull, "w")
result = TAPTestResult(stream, False, 0)
result.tracker = Tracker()
return result
def test_adds_error(self):
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
result.addError(FakeTestCase(), (None, ex, None))
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_failure(self):
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
result.addFailure(FakeTestCase(), (None, ex, None))
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_success(self):
result = self._make_one()
result.addSuccess(FakeTestCase())
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_skip(self):
result = self._make_one()
result.addSkip(FakeTestCase(), "a reason")
self.assertEqual(len(result.tracker._test_cases["FakeTestCase"]), 1)
def test_adds_expected_failure(self):
exc = self.factory.make_exc()
result = self._make_one()
result.addExpectedFailure(FakeTestCase(), exc)
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertFalse(line.ok)
self.assertEqual(line.directive.text, "TODO {}".format(_("(expected failure)")))
def test_adds_unexpected_success(self):
result = self._make_one()
result.addUnexpectedSuccess(FakeTestCase())
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertTrue(line.ok)
self.assertEqual(
line.directive.text, "TODO {}".format(_("(unexpected success)"))
)
def test_adds_subtest_success(self):
"""Test that the runner handles subtest success results."""
result = self._make_one()
test = FakeTestCase()
with test.subTest():
result.addSubTest(test, test._subtest, None)
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertTrue(line.ok)
def test_adds_subtest_failure(self):
"""Test that the runner handles subtest failure results."""
result = self._make_one()
# Python 3 does some extra testing in unittest on exceptions so fake
# the cause as if it were raised.
ex = Exception()
ex.__cause__ = None
test = FakeTestCase()
with test.subTest():
result.addSubTest(test, test._subtest, (ex.__class__, ex, None))
line = result.tracker._test_cases["FakeTestCase"][0]
self.assertFalse(line.ok)
| bsd-2-clause |
pedrobaeza/OpenUpgrade | openerp/cli/server.py | 187 | 5869 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
OpenERP - Server
OpenERP is an ERP+CRM program for small and medium businesses.
The whole source code is distributed under the terms of the
GNU Public Licence.
(c) 2003-TODAY, Fabien Pinckaers - OpenERP SA
"""
import atexit
import csv
import logging
import os
import signal
import sys
import threading
import traceback
import time
import openerp
from . import Command
__author__ = openerp.release.author
__version__ = openerp.release.version
# Also use the `openerp` logger for the main script.
_logger = logging.getLogger('openerp')
def check_root_user():
""" Exit if the process's user is 'root' (on POSIX system)."""
if os.name == 'posix':
import pwd
if pwd.getpwuid(os.getuid())[0] == 'root' :
sys.stderr.write("Running as user 'root' is a security risk, aborting.\n")
sys.exit(1)
def check_postgres_user():
""" Exit if the configured database user is 'postgres'.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if config['db_user'] == 'postgres':
sys.stderr.write("Using the database user 'postgres' is a security risk, aborting.")
sys.exit(1)
def report_configuration():
""" Log the server version and some configuration values.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
_logger.info("OpenERP version %s", __version__)
for name, value in [('addons paths', openerp.modules.module.ad_paths),
('database hostname', config['db_host'] or 'localhost'),
('database port', config['db_port'] or '5432'),
('database user', config['db_user'])]:
_logger.info("%s: %s", name, value)
def rm_pid_file():
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
try:
os.unlink(config['pidfile'])
except OSError:
pass
def setup_pid_file():
""" Create a file with the process id written in it.
This function assumes the configuration has been initialized.
"""
config = openerp.tools.config
if not openerp.evented and config['pidfile']:
with open(config['pidfile'], 'w') as fd:
pidtext = "%d" % (os.getpid())
fd.write(pidtext)
atexit.register(rm_pid_file)
def export_translation():
config = openerp.tools.config
dbname = config['db_name']
if config["language"]:
msg = "language %s" % (config["language"],)
else:
msg = "new language"
_logger.info('writing translation file for %s to %s', msg,
config["translate_out"])
fileformat = os.path.splitext(config["translate_out"])[-1][1:].lower()
with open(config["translate_out"], "w") as buf:
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_export(config["language"],
config["translate_modules"] or ["all"], buf, fileformat, cr)
_logger.info('translation file written successfully')
def import_translation():
config = openerp.tools.config
context = {'overwrite': config["overwrite_existing_translations"]}
dbname = config['db_name']
registry = openerp.modules.registry.RegistryManager.new(dbname)
with openerp.api.Environment.manage():
with registry.cursor() as cr:
openerp.tools.trans_load(
cr, config["translate_in"], config["language"], context=context,
)
def main(args):
check_root_user()
openerp.tools.config.parse_config(args)
check_postgres_user()
report_configuration()
config = openerp.tools.config
# the default limit for CSV fields in the module is 128KiB, which is not
# quite sufficient to import images to store in attachment. 500MiB is a
# bit overkill, but better safe than sorry I guess
csv.field_size_limit(500 * 1024 * 1024)
if config["test_file"]:
config["test_enable"] = True
if config["translate_out"]:
export_translation()
sys.exit(0)
if config["translate_in"]:
import_translation()
sys.exit(0)
# This needs to be done now to ensure the use of the multiprocessing
# signaling mecanism for registries loaded with -d
if config['workers']:
openerp.multi_process = True
preload = []
if config['db_name']:
preload = config['db_name'].split(',')
stop = config["stop_after_init"]
setup_pid_file()
rc = openerp.service.server.start(preload=preload, stop=stop)
sys.exit(rc)
class Server(Command):
"""Start the odoo server (default command)"""
def run(self, args):
main(args)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
h3biomed/luigi | luigi/execution_summary.py | 7 | 17170 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module provide the function :py:func:`summary` that is used for printing
an `execution summary
<https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py>`_
at the end of luigi invocations.
"""
import textwrap
import collections
import functools
import luigi
class execution_summary(luigi.Config):
summary_length = luigi.IntParameter(default=5)
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
"""
task_history = worker._add_task_history
pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'}
set_tasks = {}
set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks}
set_tasks["already_done"] = {task for (task, status, ext) in task_history
if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]}
set_tasks["ever_failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'}
set_tasks["failed"] = set_tasks["ever_failed"] - set_tasks["completed"]
set_tasks["scheduling_error"] = {task for(task, status, ext) in task_history if status == 'UNKNOWN'}
set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and not ext}
set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history
if status == 'PENDING' and task not in set_tasks["ever_failed"] and task not in set_tasks["completed"] and ext}
set_tasks["run_by_other_worker"] = set()
set_tasks["upstream_failure"] = set()
set_tasks["upstream_missing_dependency"] = set()
set_tasks["upstream_run_by_other_worker"] = set()
set_tasks["upstream_scheduling_error"] = set()
set_tasks["not_run"] = set()
return set_tasks
def _root_task(worker):
"""
Return the first task scheduled by the worker, corresponding to the root task
"""
return worker._add_task_history[0][0]
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "not_run" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited)
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dependency = False
upstream_run_by_other_worker = False
upstream_scheduling_error = False
for task in current_task._requires():
if task not in visited:
_depth_first_search(set_tasks, task, visited)
if task in set_tasks["ever_failed"] or task in set_tasks["upstream_failure"]:
set_tasks["upstream_failure"].add(current_task)
upstream_failure = True
if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]:
set_tasks["upstream_missing_dependency"].add(current_task)
upstream_missing_dependency = True
if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]:
set_tasks["upstream_run_by_other_worker"].add(current_task)
upstream_run_by_other_worker = True
if task in set_tasks["scheduling_error"]:
set_tasks["upstream_scheduling_error"].add(current_task)
upstream_scheduling_error = True
if not upstream_failure and not upstream_missing_dependency and \
not upstream_run_by_other_worker and not upstream_scheduling_error and \
current_task not in set_tasks["run_by_other_worker"]:
set_tasks["not_run"].add(current_task)
def _get_str(task_dict, extra_indent):
"""
This returns a string for each status
"""
summary_length = execution_summary().summary_length
lines = []
task_names = sorted(task_dict.keys())
for task_family in task_names:
tasks = task_dict[task_family]
tasks = sorted(tasks, key=lambda x: str(x))
prefix_size = 8 if extra_indent else 4
prefix = ' ' * prefix_size
line = None
if summary_length > 0 and len(lines) >= summary_length:
line = prefix + "..."
lines.append(line)
break
if len(tasks[0].get_params()) == 0:
line = prefix + '- {0} {1}()'.format(len(tasks), str(task_family))
elif _get_len_of_params(tasks[0]) > 60 or len(str(tasks[0])) > 200 or \
(len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)):
"""
This is to make sure that there is no really long task in the output
"""
line = prefix + '- {0} {1}(...)'.format(len(tasks), task_family)
elif len((tasks[0].get_params())) == 1:
attributes = {getattr(task, tasks[0].get_params()[0][0]) for task in tasks}
param_class = tasks[0].get_params()[0][1]
first, last = _ranging_attributes(attributes, param_class)
if first is not None and last is not None and len(attributes) > 3:
param_str = '{0}...{1}'.format(param_class.serialize(first), param_class.serialize(last))
else:
param_str = '{0}'.format(_get_str_one_parameter(tasks))
line = prefix + '- {0} {1}({2}={3})'.format(len(tasks), task_family, tasks[0].get_params()[0][0], param_str)
else:
ranging = False
params = _get_set_of_params(tasks)
unique_param_keys = list(_get_unique_param_keys(params))
if len(unique_param_keys) == 1:
unique_param, = unique_param_keys
attributes = params[unique_param]
param_class = unique_param[1]
first, last = _ranging_attributes(attributes, param_class)
if first is not None and last is not None and len(attributes) > 2:
ranging = True
line = prefix + '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(first, last, tasks, unique_param))
if not ranging:
if len(tasks) == 1:
line = prefix + '- {0} {1}'.format(len(tasks), tasks[0])
if len(tasks) == 2:
line = prefix + '- {0} {1} and {2}'.format(len(tasks), tasks[0], tasks[1])
if len(tasks) > 2:
line = prefix + '- {0} {1} ...'.format(len(tasks), tasks[0])
lines.append(line)
return '\n'.join(lines)
def _get_len_of_params(task):
return sum(len(param[0]) for param in task.get_params())
def _get_str_ranging_multiple_parameters(first, last, tasks, unique_param):
row = ''
str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(first), unique_param[1].serialize(last))
for param in tasks[0].get_params():
row += '{0}='.format(param[0])
if param[0] == unique_param[0]:
row += '{0}'.format(str_unique_param)
else:
row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0])))
if param != tasks[0].get_params()[-1]:
row += ", "
row += ')'
return row
def _get_set_of_params(tasks):
params = {}
for param in tasks[0].get_params():
params[param] = {getattr(task, param[0]) for task in tasks}
return params
def _get_unique_param_keys(params):
for param_key, param_values in params.items():
if len(param_values) > 1:
yield param_key
def _ranging_attributes(attributes, param_class):
"""
Checks if there is a continuous range
"""
next_attributes = {param_class.next_in_enumeration(attribute) for attribute in attributes}
in_first = attributes.difference(next_attributes)
in_second = next_attributes.difference(attributes)
if len(in_first) == 1 and len(in_second) == 1:
for x in attributes:
if {param_class.next_in_enumeration(x)} == in_second:
return next(iter(in_first)), x
return None, None
def _get_str_one_parameter(tasks):
row = ''
count = 0
for task in tasks:
if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200:
row += '...'
break
param = task.get_params()[0]
row += '{0}'.format(param[1].serialize(getattr(task, param[0])))
if count < len(tasks) - 1:
row += ','
count += 1
return row
def _serialize_first_param(task):
return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0]))
def _get_number_of_tasks_for(status, group_tasks):
if status == "still_pending":
return (_get_number_of_tasks(group_tasks["still_pending_ext"]) +
_get_number_of_tasks(group_tasks["still_pending_not_ext"]))
return _get_number_of_tasks(group_tasks[status])
def _get_number_of_tasks(task_dict):
return sum(len(tasks) for tasks in task_dict.values())
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if num_tasks:
space = " " if status in _PENDING_SUB_STATUSES else ""
comments[status] = '{space}* {num_tasks} {human}:\n'.format(
space=space,
num_tasks=num_tasks,
human=human)
return comments
# Oredered in the sense that they'll be printed in this order
_ORDERED_STATUSES = (
"already_done",
"completed",
"ever_failed",
"failed",
"scheduling_error",
"still_pending",
"still_pending_ext",
"run_by_other_worker",
"upstream_failure",
"upstream_missing_dependency",
"upstream_run_by_other_worker",
"upstream_scheduling_error",
"not_run",
)
_PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):])
_COMMENTS = set((
("already_done", 'present dependencies were encountered'),
("completed", 'ran successfully'),
("failed", 'failed'),
("scheduling_error", 'failed scheduling'),
("still_pending", 'were left pending, among these'),
("still_pending_ext", 'were missing external dependencies'),
("run_by_other_worker", 'were being run by another worker'),
("upstream_failure", 'had failed dependencies'),
("upstream_missing_dependency", 'had missing external dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("upstream_scheduling_error", 'had dependencies whose scheduling failed'),
("not_run", 'was not granted run permission by the scheduler'),
))
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
task_sets = _get_external_workers(worker).values()
return functools.reduce(lambda a, b: a | b, task_sets, set())
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = collections.defaultdict(set)
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_worker_id == worker._id or not other_task:
continue
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and
returns a dictionary with dictionaries with an array of tasks grouped by
their status and task name
"""
group_status = {}
for task in task_dict:
if task.task_family not in group_status:
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
def _summary_dict(worker):
set_tasks = _partition_tasks(worker)
set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker)
_populate_unknown_statuses(set_tasks)
return set_tasks
def _summary_format(set_tasks, worker):
group_tasks = {}
for status, task_dict in set_tasks.items():
group_tasks[status] = _group_tasks_by_name_and_status(task_dict)
comments = _get_comments(group_tasks)
num_all_tasks = sum([len(set_tasks["already_done"]),
len(set_tasks["completed"]), len(set_tasks["failed"]),
len(set_tasks["scheduling_error"]),
len(set_tasks["still_pending_ext"]),
len(set_tasks["still_pending_not_ext"])])
str_output = ''
str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks)
for status in _ORDERED_STATUSES:
if status not in comments:
continue
str_output += '{0}'.format(comments[status])
if status != 'still_pending':
str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES))
ext_workers = _get_external_workers(worker)
group_tasks_ext_workers = {}
for ext_worker, task_dict in ext_workers.items():
group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict)
if len(ext_workers) > 0:
str_output += "\nThe other workers were:\n"
count = 0
for ext_worker, task_dict in ext_workers.items():
if count > 3 and count < len(ext_workers) - 1:
str_output += " and {0} other workers".format(len(ext_workers) - count)
break
str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict))
count += 1
str_output += '\n'
if num_all_tasks == sum([len(set_tasks["already_done"]),
len(set_tasks["scheduling_error"]),
len(set_tasks["still_pending_ext"]),
len(set_tasks["still_pending_not_ext"])]):
if len(ext_workers) == 0:
str_output += '\n'
str_output += 'Did not run any tasks'
smiley = ""
reason = ""
if set_tasks["ever_failed"]:
if not set_tasks["failed"]:
smiley = ":)"
reason = "there were failed tasks but they all suceeded in a retry"
else:
smiley = ":("
reason = "there were failed tasks"
if set_tasks["scheduling_error"]:
reason += " and tasks whose scheduling failed"
elif set_tasks["scheduling_error"]:
smiley = ":("
reason = "there were tasks whose scheduling failed"
elif set_tasks["not_run"]:
smiley = ":|"
reason = "there were tasks that were not granted run permission by the scheduler"
elif set_tasks["still_pending_ext"]:
smiley = ":|"
reason = "there were missing external dependencies"
else:
smiley = ":)"
reason = "there were no failed tasks or missing external dependencies"
str_output += "\nThis progress looks {0} because {1}".format(smiley, reason)
if num_all_tasks == 0:
str_output = 'Did not schedule any tasks'
return str_output
def _summary_wrap(str_output):
return textwrap.dedent("""
===== Luigi Execution Summary =====
{str_output}
===== Luigi Execution Summary =====
""").format(str_output=str_output)
def summary(worker):
"""
Given a worker, return a human readable summary of what the worker have
done.
"""
return _summary_wrap(_summary_format(_summary_dict(worker), worker))
# 5
| apache-2.0 |
mKeRix/home-assistant | homeassistant/components/freebox/config_flow.py | 6 | 3504 | """Config flow to configure the Freebox integration."""
import logging
from aiofreepybox.exceptions import AuthorizationError, HttpRequestError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN # pylint: disable=unused-import
from .router import get_api
_LOGGER = logging.getLogger(__name__)
class FreeboxFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize Freebox config flow."""
self._host = None
self._port = None
def _show_setup_form(self, user_input=None, errors=None):
"""Show the setup form to the user."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=user_input.get(CONF_HOST, "")): str,
vol.Required(CONF_PORT, default=user_input.get(CONF_PORT, "")): int,
}
),
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is None:
return self._show_setup_form(user_input, errors)
self._host = user_input[CONF_HOST]
self._port = user_input[CONF_PORT]
# Check if already configured
await self.async_set_unique_id(self._host)
self._abort_if_unique_id_configured()
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with the Freebox router.
Given a configured host, will ask the user to press the button
to connect to the router.
"""
if user_input is None:
return self.async_show_form(step_id="link")
errors = {}
fbx = await get_api(self.hass, self._host)
try:
# Open connection and check authentication
await fbx.open(self._host, self._port)
# Check permissions
await fbx.system.get_config()
await fbx.lan.get_hosts_list()
await self.hass.async_block_till_done()
# Close connection
await fbx.close()
return self.async_create_entry(
title=self._host, data={CONF_HOST: self._host, CONF_PORT: self._port},
)
except AuthorizationError as error:
_LOGGER.error(error)
errors["base"] = "register_failed"
except HttpRequestError:
_LOGGER.error("Error connecting to the Freebox router at %s", self._host)
errors["base"] = "connection_failed"
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Unknown error connecting with Freebox router at %s", self._host
)
errors["base"] = "unknown"
return self.async_show_form(step_id="link", errors=errors)
async def async_step_import(self, user_input=None):
"""Import a config entry."""
return await self.async_step_user(user_input)
async def async_step_discovery(self, discovery_info):
"""Initialize step from discovery."""
return await self.async_step_user(discovery_info)
| mit |
FireballDWF/cloud-custodian | tools/c7n_gcp/tests/test_notify_gcp.py | 5 | 2961 | # Copyright 2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gcp_common import BaseTest
from c7n_gcp.client import Session
import mock
import sys
class NotifyTest(BaseTest):
def test_pubsub_notify(self):
factory = self.replay_flight_data("notify-action")
orig_client = Session.client
stub_client = mock.MagicMock()
calls = []
def client_factory(*args, **kw):
calls.append(args)
if len(calls) == 1:
return orig_client(*args, **kw)
return stub_client
self.patch(Session, 'client', client_factory)
p = self.load_policy({
'name': 'test-notify',
'resource': 'gcp.pubsub-topic',
'filters': [
{
'name': 'projects/cloud-custodian/topics/gcptestnotifytopic'
}
],
'actions': [
{'type': 'notify',
'template': 'default',
'priority_header': '2',
'subject': 'testing notify action',
'to': ['[email protected]'],
'transport':
{'type': 'pubsub',
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic'}
}
]}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
stub_client.execute_command.assert_called_once()
if sys.version_info.major < 3:
return
stub_client.execute_command.assert_called_with(
'publish', {
'topic': 'projects/cloud-custodian/topics/gcptestnotifytopic',
'body': {
'messages': {
'data': ('eJzdUrtqAzEQ7PUVh+qcjd2EuEqVLl8QgpFXe2cFnVZIq8Bh/O/'
'RA58vkCqkSrHNDDuPZS9C4ic6lofOJWsfhFQAlBwfjc6YhBSZtFGu3'
'+2fdvLO/0wGHA25wilrC+DJGpgzcBHSqQkLxRi5d8RmmNtOpBSgUiP4jU'
'+nmE49kzdQ+MFYxhAz/SZWKj7QBwLHLVhKul+'
'ybOti3GapYtR8mpi4ivfagHPIRZBnXwXviRgnbxVXVOOgkuXaJRgKhuf'
'jGZXGUNh9wXPakuRWzbixa1pdc6qSVO1kihieNU3KuA3QJGsgDspFT4Hb'
'nW6B2iHadon/69K5trguxb+b/OPWq9/6i+/JcvDoDq+'
'K4Yz6ZfWVTbUcucwX+HoY5Q==')
}}})
| apache-2.0 |
joeyli/qemu-acpitad | tests/qemu-iotests/qed.py | 248 | 7194 | #!/usr/bin/env python
#
# Tool to manipulate QED image files
#
# Copyright (C) 2010 IBM, Corp.
#
# Authors:
# Stefan Hajnoczi <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
import sys
import struct
import random
import optparse
# This can be used as a module
__all__ = ['QED_F_NEED_CHECK', 'QED']
QED_F_NEED_CHECK = 0x02
header_fmt = '<IIIIQQQQQII'
header_size = struct.calcsize(header_fmt)
field_names = ['magic', 'cluster_size', 'table_size',
'header_size', 'features', 'compat_features',
'autoclear_features', 'l1_table_offset', 'image_size',
'backing_filename_offset', 'backing_filename_size']
table_elem_fmt = '<Q'
table_elem_size = struct.calcsize(table_elem_fmt)
def err(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def unpack_header(s):
fields = struct.unpack(header_fmt, s)
return dict((field_names[idx], val) for idx, val in enumerate(fields))
def pack_header(header):
fields = tuple(header[x] for x in field_names)
return struct.pack(header_fmt, *fields)
def unpack_table_elem(s):
return struct.unpack(table_elem_fmt, s)[0]
def pack_table_elem(elem):
return struct.pack(table_elem_fmt, elem)
class QED(object):
def __init__(self, f):
self.f = f
self.f.seek(0, 2)
self.filesize = f.tell()
self.load_header()
self.load_l1_table()
def raw_pread(self, offset, size):
self.f.seek(offset)
return self.f.read(size)
def raw_pwrite(self, offset, data):
self.f.seek(offset)
return self.f.write(data)
def load_header(self):
self.header = unpack_header(self.raw_pread(0, header_size))
def store_header(self):
self.raw_pwrite(0, pack_header(self.header))
def read_table(self, offset):
size = self.header['table_size'] * self.header['cluster_size']
s = self.raw_pread(offset, size)
table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)]
return table
def load_l1_table(self):
self.l1_table = self.read_table(self.header['l1_table_offset'])
self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
def write_table(self, offset, table):
s = ''.join(pack_table_elem(x) for x in table)
self.raw_pwrite(offset, s)
def random_table_item(table):
vals = [(index, offset) for index, offset in enumerate(table) if offset != 0]
if not vals:
err('cannot pick random item because table is empty')
return random.choice(vals)
def corrupt_table_duplicate(table):
'''Corrupt a table by introducing a duplicate offset'''
victim_idx, victim_val = random_table_item(table)
unique_vals = set(table)
if len(unique_vals) == 1:
err('no duplication corruption possible in table')
dup_val = random.choice(list(unique_vals.difference([victim_val])))
table[victim_idx] = dup_val
def corrupt_table_invalidate(qed, table):
'''Corrupt a table by introducing an invalid offset'''
index, _ = random_table_item(table)
table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024)
def cmd_show(qed, *args):
'''show [header|l1|l2 <offset>]- Show header or l1/l2 tables'''
if not args or args[0] == 'header':
print qed.header
elif args[0] == 'l1':
print qed.l1_table
elif len(args) == 2 and args[0] == 'l2':
offset = int(args[1])
print qed.read_table(offset)
else:
err('unrecognized sub-command')
def cmd_duplicate(qed, table_level):
'''duplicate l1|l2 - Duplicate a random table element'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_duplicate(table)
qed.write_table(offset, table)
def cmd_invalidate(qed, table_level):
'''invalidate l1|l2 - Plant an invalid table element at random'''
if table_level == 'l1':
offset = qed.header['l1_table_offset']
table = qed.l1_table
elif table_level == 'l2':
_, offset = random_table_item(qed.l1_table)
table = qed.read_table(offset)
else:
err('unrecognized sub-command')
corrupt_table_invalidate(qed, table)
qed.write_table(offset, table)
def cmd_need_check(qed, *args):
'''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit'''
if not args:
print bool(qed.header['features'] & QED_F_NEED_CHECK)
return
if args[0] == 'on':
qed.header['features'] |= QED_F_NEED_CHECK
elif args[0] == 'off':
qed.header['features'] &= ~QED_F_NEED_CHECK
else:
err('unrecognized sub-command')
qed.store_header()
def cmd_zero_cluster(qed, pos, *args):
'''zero-cluster <pos> [<n>] - Zero data clusters'''
pos, n = int(pos), 1
if args:
if len(args) != 1:
err('expected one argument')
n = int(args[0])
for i in xrange(n):
l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
if qed.l1_table[l1_index] == 0:
err('no l2 table allocated')
l2_offset = qed.l1_table[l1_index]
l2_table = qed.read_table(l2_offset)
l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
l2_table[l2_index] = 1 # zero the data cluster
qed.write_table(l2_offset, l2_table)
pos += qed.header['cluster_size']
def cmd_copy_metadata(qed, outfile):
'''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)'''
out = open(outfile, 'wb')
# Match file size
out.seek(qed.filesize - 1)
out.write('\0')
# Copy header clusters
out.seek(0)
header_size_bytes = qed.header['header_size'] * qed.header['cluster_size']
out.write(qed.raw_pread(0, header_size_bytes))
# Copy L1 table
out.seek(qed.header['l1_table_offset'])
s = ''.join(pack_table_elem(x) for x in qed.l1_table)
out.write(s)
# Copy L2 tables
for l2_offset in qed.l1_table:
if l2_offset == 0:
continue
l2_table = qed.read_table(l2_offset)
out.seek(l2_offset)
s = ''.join(pack_table_elem(x) for x in l2_table)
out.write(s)
out.close()
def usage():
print 'Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]
print
print 'Supported commands:'
for cmd in sorted(x for x in globals() if x.startswith('cmd_')):
print globals()[cmd].__doc__
sys.exit(1)
def main():
if len(sys.argv) < 3:
usage()
filename, cmd = sys.argv[1:3]
cmd = 'cmd_' + cmd.replace('-', '_')
if cmd not in globals():
usage()
qed = QED(open(filename, 'r+b'))
try:
globals()[cmd](qed, *sys.argv[3:])
except TypeError, e:
sys.stderr.write(globals()[cmd].__doc__ + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 |
sugruedes/bitcoin | contrib/testgen/gen_base58_test_vectors.py | 1000 | 4343 | #!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| mit |
rmboggs/django | django/core/files/utils.py | 395 | 1338 | from django.utils import six
class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.xreadlines)
if six.PY3:
seekable = property(lambda self: self.file.seekable)
def __iter__(self):
return iter(self.file)
| bsd-3-clause |
levkar/odoo | addons/hw_scale/controllers/main.py | 20 | 15524 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import os
import re
import time
from collections import namedtuple
from os import listdir
from threading import Thread, Lock
from odoo import http
import odoo.addons.hw_proxy.controllers.main as hw_proxy
_logger = logging.getLogger(__name__)
DRIVER_NAME = 'scale'
try:
import serial
except ImportError:
_logger.error('Odoo module hw_scale depends on the pyserial python module')
serial = None
def _toledo8217StatusParse(status):
""" Parse a scale's status, returning a `(weight, weight_info)` pair. """
weight, weight_info = None, None
stat = ord(status[status.index('?') + 1])
if stat == 0:
weight_info = 'ok'
else:
weight_info = []
if stat & 1 :
weight_info.append('moving')
if stat & 1 << 1:
weight_info.append('over_capacity')
if stat & 1 << 2:
weight_info.append('negative')
weight = 0.0
if stat & 1 << 3:
weight_info.append('outside_zero_capture_range')
if stat & 1 << 4:
weight_info.append('center_of_zero')
if stat & 1 << 5:
weight_info.append('net_weight')
return weight, weight_info
ScaleProtocol = namedtuple(
'ScaleProtocol',
"name baudrate bytesize stopbits parity timeout writeTimeout weightRegexp statusRegexp "
"statusParse commandTerminator commandDelay weightDelay newWeightDelay "
"weightCommand zeroCommand tareCommand clearCommand emptyAnswerValid autoResetWeight")
# 8217 Mettler-Toledo (Weight-only) Protocol, as described in the scale's Service Manual.
# e.g. here: https://www.manualslib.com/manual/861274/Mettler-Toledo-Viva.html?page=51#manual
# Our recommended scale, the Mettler-Toledo "Ariva-S", supports this protocol on
# both the USB and RS232 ports, it can be configured in the setup menu as protocol option 3.
# We use the default serial protocol settings, the scale's settings can be configured in the
# scale's menu anyway.
Toledo8217Protocol = ScaleProtocol(
name='Toledo 8217',
baudrate=9600,
bytesize=serial.SEVENBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_EVEN,
timeout=1,
writeTimeout=1,
weightRegexp="\x02\\s*([0-9.]+)N?\\r",
statusRegexp="\x02\\s*(\\?.)\\r",
statusParse=_toledo8217StatusParse,
commandDelay=0.2,
weightDelay=0.5,
newWeightDelay=0.2,
commandTerminator='',
weightCommand='W',
zeroCommand='Z',
tareCommand='T',
clearCommand='C',
emptyAnswerValid=False,
autoResetWeight=False,
)
# The ADAM scales have their own RS232 protocol, usually documented in the scale's manual
# e.g at https://www.adamequipment.com/media/docs/Print%20Publications/Manuals/PDF/AZEXTRA/AZEXTRA-UM.pdf
# https://www.manualslib.com/manual/879782/Adam-Equipment-Cbd-4.html?page=32#manual
# Only the baudrate and label format seem to be configurable in the AZExtra series.
ADAMEquipmentProtocol = ScaleProtocol(
name='Adam Equipment',
baudrate=4800,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE,
timeout=0.2,
writeTimeout=0.2,
weightRegexp=r"\s*([0-9.]+)kg", # LABEL format 3 + KG in the scale settings, but Label 1/2 should work
statusRegexp=None,
statusParse=None,
commandTerminator="\r\n",
commandDelay=0.2,
weightDelay=0.5,
newWeightDelay=5, # AZExtra beeps every time you ask for a weight that was previously returned!
# Adding an extra delay gives the operator a chance to remove the products
# before the scale starts beeping. Could not find a way to disable the beeps.
weightCommand='P',
zeroCommand='Z',
tareCommand='T',
clearCommand=None, # No clear command -> Tare again
emptyAnswerValid=True, # AZExtra does not answer unless a new non-zero weight has been detected
autoResetWeight=True, # AZExtra will not return 0 after removing products
)
SCALE_PROTOCOLS = (
Toledo8217Protocol,
ADAMEquipmentProtocol, # must be listed last, as it supports no probing!
)
class Scale(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.scalelock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/serial/by-path/'
self.weight = 0
self.weight_info = 'ok'
self.device = None
self.path_to_scale = ''
self.protocol = None
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message=None):
if status == self.status['status']:
if message is not None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
if status == 'error' and message:
_logger.error('Scale Error: '+ message)
elif status == 'disconnected' and message:
_logger.warning('Disconnected Scale: '+ message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Scale Error: '+ message)
elif status == 'disconnected' and message:
_logger.info('Disconnected Scale: %s', message)
def _get_raw_response(self, connection):
answer = []
while True:
char = connection.read(1) # may return `bytes` or `str`
if not char:
break
else:
answer.append(char)
return ''.join(answer)
def _parse_weight_answer(self, protocol, answer):
""" Parse a scale's answer to a weighing request, returning
a `(weight, weight_info, status)` pair.
"""
weight, weight_info, status = None, None, None
try:
_logger.debug("Parsing weight [%r]", answer)
if not answer and protocol.emptyAnswerValid:
# Some scales do not return the same value again, but we
# should not clear the weight data, POS may still be reading it
return weight, weight_info, status
if protocol.statusRegexp and re.search(protocol.statusRegexp, answer):
# parse status to set weight_info - we'll try weighing again later
weight, weight_info = protocol.statusParse(answer)
else:
match = re.search(protocol.weightRegexp, answer)
if match:
weight_text = match.group(1)
try:
weight = float(weight_text)
_logger.info('Weight: %s', weight)
except ValueError:
_logger.exception("Cannot parse weight [%r]", weight_text)
status = 'Invalid weight, please power-cycle the scale'
else:
_logger.error("Cannot parse scale answer [%r]", answer)
status = 'Invalid scale answer, please power-cycle the scale'
except Exception as e:
_logger.exception("Cannot parse scale answer [%r]", answer)
status = ("Could not weigh on scale %s with protocol %s: %s" %
(self.path_to_scale, protocol.name, e))
return weight, weight_info, status
def get_device(self):
if self.device:
return self.device
with hw_proxy.rs232_lock:
try:
if not os.path.exists(self.input_dir):
self.set_status('disconnected', 'No RS-232 device found')
return None
devices = [device for device in listdir(self.input_dir)]
for device in devices:
driver = hw_proxy.rs232_devices.get(device)
if driver and driver != DRIVER_NAME:
# belongs to another driver
_logger.info('Ignoring %s, belongs to %s', device, driver)
continue
path = self.input_dir + device
for protocol in SCALE_PROTOCOLS:
_logger.info('Probing %s with protocol %s', path, protocol)
connection = serial.Serial(path,
baudrate=protocol.baudrate,
bytesize=protocol.bytesize,
stopbits=protocol.stopbits,
parity=protocol.parity,
timeout=1, # longer timeouts for probing
writeTimeout=1) # longer timeouts for probing
connection.write(protocol.weightCommand + protocol.commandTerminator)
time.sleep(protocol.commandDelay)
answer = self._get_raw_response(connection)
weight, weight_info, status = self._parse_weight_answer(protocol, answer)
if status:
_logger.info('Probing %s: no valid answer to protocol %s', path, protocol.name)
else:
_logger.info('Probing %s: answer looks ok for protocol %s', path, protocol.name)
self.path_to_scale = path
self.protocol = protocol
self.set_status(
'connected',
'Connected to %s with %s protocol' % (device, protocol.name)
)
connection.timeout = protocol.timeout
connection.writeTimeout = protocol.writeTimeout
hw_proxy.rs232_devices[path] = DRIVER_NAME
return connection
self.set_status('disconnected', 'No supported RS-232 scale found')
except Exception as e:
_logger.exception('Failed probing for scales')
self.set_status('error', 'Failed probing for scales: %s' % e)
return None
def get_weight(self):
self.lockedstart()
return self.weight
def get_weight_info(self):
self.lockedstart()
return self.weight_info
def get_status(self):
self.lockedstart()
return self.status
def read_weight(self):
with self.scalelock:
p = self.protocol
try:
self.device.write(p.weightCommand + p.commandTerminator)
time.sleep(p.commandDelay)
answer = self._get_raw_response(self.device)
weight, weight_info, status = self._parse_weight_answer(p, answer)
if status:
self.set_status('error', status)
self.device = None
else:
if weight is not None:
self.weight = weight
if weight_info is not None:
self.weight_info = weight_info
except Exception as e:
self.set_status(
'error',
"Could not weigh on scale %s with protocol %s: %s" %
(self.path_to_scale, p.name, e))
self.device = None
def set_zero(self):
with self.scalelock:
if self.device:
try:
self.device.write(self.protocol.zeroCommand + self.protocol.commandTerminator)
time.sleep(self.protocol.commandDelay)
except Exception as e:
self.set_status(
'error',
"Could not zero scale %s with protocol %s: %s" %
(self.path_to_scale, self.protocol.name, e))
self.device = None
def set_tare(self):
with self.scalelock:
if self.device:
try:
self.device.write(self.protocol.tareCommand + self.protocol.commandTerminator)
time.sleep(self.protocol.commandDelay)
except Exception as e:
self.set_status(
'error',
"Could not tare scale %s with protocol %s: %s" %
(self.path_to_scale, self.protocol.name, e))
self.device = None
def clear_tare(self):
with self.scalelock:
if self.device:
p = self.protocol
try:
# if the protocol has no clear, we can just tare again
clearCommand = p.clearCommand or p.tareCommand
self.device.write(clearCommand + p.commandTerminator)
time.sleep(p.commandDelay)
except Exception as e:
self.set_status(
'error',
"Could not clear tare on scale %s with protocol %s: %s" %
(self.path_to_scale, p.name, e))
self.device = None
def run(self):
self.device = None
while True:
if self.device:
old_weight = self.weight
self.read_weight()
if self.weight != old_weight:
_logger.info('New Weight: %s, sleeping %ss', self.weight, self.protocol.newWeightDelay)
time.sleep(self.protocol.newWeightDelay)
if self.weight and self.protocol.autoResetWeight:
self.weight = 0
else:
_logger.info('Weight: %s, sleeping %ss', self.weight, self.protocol.weightDelay)
time.sleep(self.protocol.weightDelay)
else:
with self.scalelock:
self.device = self.get_device()
if not self.device:
# retry later to support "plug and play"
time.sleep(10)
scale_thread = None
if serial:
scale_thread = Scale()
hw_proxy.drivers[DRIVER_NAME] = scale_thread
class ScaleDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scale_read/', type='json', auth='none', cors='*')
def scale_read(self):
if scale_thread:
return {'weight': scale_thread.get_weight(),
'unit': 'kg',
'info': scale_thread.get_weight_info()}
return None
@http.route('/hw_proxy/scale_zero/', type='json', auth='none', cors='*')
def scale_zero(self):
if scale_thread:
scale_thread.set_zero()
return True
@http.route('/hw_proxy/scale_tare/', type='json', auth='none', cors='*')
def scale_tare(self):
if scale_thread:
scale_thread.set_tare()
return True
@http.route('/hw_proxy/scale_clear_tare/', type='json', auth='none', cors='*')
def scale_clear_tare(self):
if scale_thread:
scale_thread.clear_tare()
return True
| agpl-3.0 |
CollabQ/CollabQ | vendor/gdata/spreadsheet/__init__.py | 147 | 17942 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Spreadsheets.
"""
__author__ = '[email protected] (Laura Beth Lincoln)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
import re
import string
# XML namespaces which are often used in Google Spreadsheets entities.
GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006'
GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets'
'/2006/extended')
GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets'
'/2006/extended}%s')
class ColCount(atom.AtomBase):
"""The Google Spreadsheets colCount element """
_tag = 'colCount'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ColCountFromString(xml_string):
return atom.CreateClassFromXMLString(ColCount, xml_string)
class RowCount(atom.AtomBase):
"""The Google Spreadsheets rowCount element """
_tag = 'rowCount'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RowCountFromString(xml_string):
return atom.CreateClassFromXMLString(RowCount, xml_string)
class Cell(atom.AtomBase):
"""The Google Spreadsheets cell element """
_tag = 'cell'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['row'] = 'row'
_attributes['col'] = 'col'
_attributes['inputValue'] = 'inputValue'
_attributes['numericValue'] = 'numericValue'
def __init__(self, text=None, row=None, col=None, inputValue=None,
numericValue=None, extension_elements=None, extension_attributes=None):
self.text = text
self.row = row
self.col = col
self.inputValue = inputValue
self.numericValue = numericValue
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CellFromString(xml_string):
return atom.CreateClassFromXMLString(Cell, xml_string)
class Custom(atom.AtomBase):
"""The Google Spreadsheets custom element"""
_namespace = GSPREADSHEETS_EXTENDED_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, column=None, text=None, extension_elements=None,
extension_attributes=None):
self.column = column # The name of the column
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.column)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.column))
self._AddMembersToElementTree(new_tree)
return new_tree
def _HarvestElementTree(self, tree):
namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1)
self.column = local_tag
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
self.text = tree.text
def CustomFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _CustomFromElementTree(element_tree)
def _CustomFromElementTree(element_tree):
namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1)
if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE:
new_custom = Custom()
new_custom._HarvestElementTree(element_tree)
new_custom.column = local_tag
return new_custom
return None
class SpreadsheetsSpreadsheet(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a Spreadsheet Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsSpreadsheetFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet,
xml_string)
class SpreadsheetsWorksheet(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a Worksheet Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count',
RowCount)
_children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count',
ColCount)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
row_count=None, col_count=None, text=None, extension_elements=None,
extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.row_count = row_count
self.col_count = col_count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsWorksheetFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsWorksheet,
xml_string)
class SpreadsheetsCell(gdata.BatchEntry):
"""A Google Spreadsheets flavor of a Cell Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
cell=None, batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.updated = updated
self.cell = cell
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsCellFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsCell,
xml_string)
class SpreadsheetsList(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a List Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
custom=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.custom = custom or {}
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0:
# If this is in the custom namespace, make add it to the custom dict.
name = child_tree.tag[child_tree.tag.index('}')+1:]
custom = _CustomFromElementTree(child_tree)
if custom:
self.custom[name] = custom
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for name, custom in self.custom.iteritems():
custom._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
def SpreadsheetsListFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsList,
xml_string)
element_tree = ElementTree.fromstring(xml_string)
return _SpreadsheetsListFromElementTree(element_tree)
class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsSpreadsheet])
def SpreadsheetsSpreadsheetsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed,
xml_string)
class SpreadsheetsWorksheetsFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsWorksheet])
def SpreadsheetsWorksheetsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed,
xml_string)
class SpreadsheetsCellsFeed(gdata.BatchFeed):
"""A feed containing Google Spreadsheets Cells"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsCell])
_children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count',
RowCount)
_children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count',
ColCount)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None, row_count=None,
col_count=None, interrupted=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text, interrupted=interrupted)
self.row_count = row_count
self.col_count = col_count
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def SpreadsheetsCellsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed,
xml_string)
class SpreadsheetsListFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsList])
def SpreadsheetsListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsListFeed,
xml_string)
| apache-2.0 |
pku9104038/edx-platform | lms/djangoapps/courseware/management/commands/export_course.py | 7 | 2593 | """
A Django command that exports a course to a tar.gz file.
If <filename> is '-', it pipes the file to stdout
"""
import os
import shutil
import tarfile
from tempfile import mktemp, mkdtemp
from textwrap import dedent
from path import path
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.xml_exporter import export_to_xml
class Command(BaseCommand):
"""
Export a course to XML. The output is compressed as a tar.gz file
"""
args = "<course_id> <output_filename>"
help = dedent(__doc__).strip()
def handle(self, *args, **options):
course_id, filename, pipe_results = self._parse_arguments(args)
export_course_to_tarfile(course_id, filename)
results = self._get_results(filename) if pipe_results else None
return results
def _parse_arguments(self, args):
"""Parse command line arguments"""
try:
course_id = args[0]
filename = args[1]
except IndexError:
raise CommandError("Insufficient arguments")
# If filename is '-' save to a temp file
pipe_results = False
if filename == '-':
filename = mktemp()
pipe_results = True
return course_id, filename, pipe_results
def _get_results(self, filename):
"""Load results from file"""
results = None
with open(filename) as f:
results = f.read()
os.remove(filename)
return results
def export_course_to_tarfile(course_id, filename):
"""Exports a course into a tar.gz file"""
tmp_dir = mkdtemp()
try:
course_dir = export_course_to_directory(course_id, tmp_dir)
compress_directory(course_dir, filename)
finally:
shutil.rmtree(tmp_dir)
def export_course_to_directory(course_id, root_dir):
"""Export course into a directory"""
store = modulestore()
course = store.get_course(course_id)
if course is None:
raise CommandError("Invalid course_id")
course_name = course.location.course_id.replace('/', '-')
export_to_xml(store, None, course.location, root_dir, course_name)
course_dir = path(root_dir) / course_name
return course_dir
def compress_directory(directory, filename):
"""Compress a directrory into a tar.gz file"""
mode = 'w:gz'
name = path(directory).name
with tarfile.open(filename, mode) as tar_file:
tar_file.add(directory, arcname=name)
| agpl-3.0 |
fanne/june | june/forms/node.py | 11 | 1204 | # coding: utf-8
from wtforms import TextField, TextAreaField, SelectField, BooleanField
from wtforms.validators import DataRequired
from flask.ext.babel import lazy_gettext as _
from ._base import BaseForm
from ..models import Node
class NodeForm(BaseForm):
title = TextField(
_('Title'), validators=[DataRequired()],
description=_('The screen title of the node')
)
urlname = TextField(
_('URL'), validators=[DataRequired()],
description=_('The url name of the node')
)
description = TextAreaField(_('Description'))
role = SelectField(
_('Role'),
description=_('Required role'),
choices=[
('user', _('User')),
('staff', _('Staff')),
('admin', _('Admin'))
],
default='user',
)
on_home = BooleanField(_('Show on home page'), default=True)
def validate_urlname(self, field):
if self._obj and self._obj.urlname == field.data:
return
if Node.query.filter_by(urlname=field.data).count():
raise ValueError(_('The node exists'))
def save(self):
node = Node(**self.data)
node.save()
return node
| bsd-3-clause |
justajeffy/arsenalsuite | cpp/apps/bach/data_export/FixCachedKeywords.py | 10 | 7224 | #!/usr/bin/env python2.5
#
# Copyright (c) 2009 Dr. D Studios. (Please refer to license for details)
# SVN_META_HEADURL = "$HeadURL: $"
# SVN_META_ID = "$Id: FixCachedKeywords.py 9408 2010-03-03 22:35:49Z brobison $"
#
import sys
import os
from PyQt4.QtSql import *
#-----------------------------------------------------------------------------
class FixCachedKeywords:
#-----------------------------------------------------------------------------
def __init__( self, parent ):
self.parent = parent
self.pgAssetName2Id = {}
self.pgAssetId2Name = {}
self.pgKeywordName2Id = {}
self.pgKeywordId2Name = {}
self.pgKeywordMapping = {}
self._pgdb = QSqlDatabase.addDatabase( "QPSQL", "pgDB" )
self._pgdb.setDatabaseName( "bach" )
self._pgdb.setHostName( "sql01" )
self._pgdb.setUserName( "bach" )
self._pgdb.setPassword( "escher" )
if not self._pgdb.open():
self.p( "Couldn't open Bach DB" )
return False
self.p( "Opened Bach DB" )
self.dryRun = True
self.dryRun = False
self.fout = file( 'fixKeyword.bach.sql', 'wt' )
self.collectPGData_Keyword()
idx = 0
for k in self.pgKeywordMapping:
keywords = ','.join( self.pgKeywordMapping[ k ] )
s = "UPDATE bachasset SET cachedkeywords='%s' WHERE keybachasset=%d;" % ( esc( keywords ), k )
self._doPGSqlMod( s )
print idx, len( self.pgKeywordMapping), s
#-----------------------------------------------------------------------------
def p( self, p ):
self.parent.printIt( p )
#-----------------------------------------------------------------------------
def pS( self, p ):
self.parent.stat( p )
#-----------------------------------------------------------------------------
def _doPGSql( self, query ):
# self.p( '>>> Executing: [Bach] [%s]' % query )
q = QSqlQuery( query, self._pgdb )
#self.p( '<<< Done' )
return q
#-----------------------------------------------------------------------------
def _doPGSqlMod( self, query ):
self.fout.write( query )
self.fout.write( '\n' )
if self.dryRun:
return
#self.p( '>>> Executing: [Bach] [%s]' % query )
q = QSqlQuery( query, self._pgdb )
#self.p( '<<< Done' )
return q
#-----------------------------------------------------------------------------
def collectPGData_Asset(self):
q = self._doPGSql("""SELECT path, keybachasset FROM bachasset""")
while(q.next()):
name, id = extractPGAsset( q )
self.pgAssetName2Id[ name ] = id
self.pgAssetId2Name[ id ] = name
#-----------------------------------------------------------------------------
def collectPGData_Keyword(self):
q = self._doPGSql("""SELECT keybachasset, name FROM
bachkeywordmap, bachasset, bachkeyword
WHERE
fkeybachasset=keybachasset AND
fkeybachkeyword=keybachkeyword""")
while(q.next()):
d = extractPGKeywordMapping( q )
id = d[ 0 ]
name = d[ 1 ]
if not id in self.pgKeywordMapping:
self.pgKeywordMapping[ id ] = [ name ]
self.pgKeywordMapping[ id ].append( name )
#-----------------------------------------------------------------------------
def collectPGData(self):
self.p( "Preloading Bach data..." )
#----------------
self.collectPGData_Asset()
self.collectPGData_Keyword()
#----------------
self.p( "... finished" )
#-----------------------------------------------------------------------------
def assetExists(self, path):
if not path in self.pgAssetName2Id:
return 0
return self.pgAssetName2Id[ path ]
#-----------------------------------------------------------------------------
def getAssetId( self, path ):
return self.assetExists( path )
#-----------------------------------------------------------------------------
def keywordExists(self, name):
if not name in self.pgKeywordName2Id:
return 0
return self.pgKeywordName2Id[ name ]
#-----------------------------------------------------------------------------
def getKeywordId( self, name ):
return self.keywordExists( name )
#-----------------------------------------------------------------------------
def keywordMapExists(self, imgPath, keywordName):
if not imgPath in self.pgKeywordMapping:
return False
if not keywordName in self.pgKeywordMapping[ imgPath ]:
return False
return True
#-----------------------------------------------------------------------------
def collectionExists(self, name):
if not name in self.pgCollectionName2Id:
return 0
return self.pgCollectionName2Id[ name ]
#-----------------------------------------------------------------------------
def getCollectionId( self, name ):
return self.collectionExists( name )
#-----------------------------------------------------------------------------
def collectionMapExists(self, imgPath, collectionName):
if not imgPath in self.pgCollectionMapping:
return False
if not collectionName in self.pgCollectionMapping[ imgPath ]:
return False
return True
#-----------------------------------------------------------------------------
def esc( s ):
s = s.replace( '\'', '\'\'' )
return s
#-----------------------------------------------------------------------------
def toS( variant ):
v = variant.toString()
return str( v.toAscii() )
#-----------------------------------------------------------------------------
def toI( variant ):
v, ok = variant.toInt()
return int( v )
#-----------------------------------------------------------------------------
def extractPGAsset( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGKeyword( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGCollection( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGCollectionMapping( query ):
d = []
d.append( toI( query.value(0) ) )
d.append( toS( query.value(1) ) )
return d
#-----------------------------------------------------------------------------
def extractPGKeywordMapping( query ):
d = []
d.append( toI( query.value(0) ) )
d.append( toS( query.value(1) ) )
return d
#-----------------------------------------------------------------------------
class Printer():
def printIt(self,p):
print p
if __name__=='__main__':
printer = Printer()
fixit = FixCachedKeywords( printer )
| gpl-2.0 |
manastech/de-bee | gdata/Crypto/Cipher/__init__.py | 271 | 1145 | """Secret-key encryption algorithms.
Secret-key encryption algorithms transform plaintext in some way that
is dependent on a key, producing ciphertext. This transformation can
easily be reversed, if (and, hopefully, only if) one knows the key.
The encryption modules here all support the interface described in PEP
272, "API for Block Encryption Algorithms".
If you don't know which algorithm to choose, use AES because it's
standard and has undergone a fair bit of examination.
Crypto.Cipher.AES Advanced Encryption Standard
Crypto.Cipher.ARC2 Alleged RC2
Crypto.Cipher.ARC4 Alleged RC4
Crypto.Cipher.Blowfish
Crypto.Cipher.CAST
Crypto.Cipher.DES The Data Encryption Standard. Very commonly used
in the past, but today its 56-bit keys are too small.
Crypto.Cipher.DES3 Triple DES.
Crypto.Cipher.IDEA
Crypto.Cipher.RC5
Crypto.Cipher.XOR The simple XOR cipher.
"""
__all__ = ['AES', 'ARC2', 'ARC4',
'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5',
'XOR'
]
__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $"
| mit |
jordiclariana/ansible | lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py | 48 | 3696 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: iam_mfa_device_facts
short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
description:
- List the MFA (Multi-Factor Authentication) devices registered for a user
version_added: "2.2"
author: Victor Costan (@pwnall)
options:
user_name:
description:
- The name of the user whose MFA devices will be listed
required: false
default: null
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
'''
RETURN = """
mfa_devices:
description: The MFA devices registered for the given user
returned: always
type: list
sample:
- enable_date: "2016-03-11T23:25:36+00:00"
serial_number: arn:aws:iam::085120003701:mfa/pwnall
user_name: pwnall
- enable_date: "2016-03-11T23:25:37+00:00"
serial_number: arn:aws:iam::085120003702:mfa/pwnall
user_name: pwnall
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
iam_mfa_device_facts:
register: mfa_devices
# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
sts_assume_role:
mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
role_arn: "arn:aws:iam::123456789012:role/someRole"
role_session_name: "someRoleSession"
register: assumed_role
'''
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def list_mfa_devices(connection, module):
user_name = module.params.get('user_name')
changed = False
args = {}
if user_name is not None:
args['UserName'] = user_name
try:
response = connection.list_mfa_devices(**args)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
user_name=dict(required=False, default=None)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
else:
module.fail_json(msg="region must be specified")
list_mfa_devices(connection, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
crobinso/pkgdb2 | pkgdb2/ui/collections.py | 4 | 6211 | # -*- coding: utf-8 -*-
#
# Copyright © 2013-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
UI namespace for the Flask application.
'''
import flask
from math import ceil
import pkgdb2.forms
import pkgdb2.lib as pkgdblib
from pkgdb2 import SESSION, APP, is_admin
from pkgdb2.ui import UI
## Some of the object we use here have inherited methods which apparently
## pylint does not detect.
# pylint: disable=E1101
@UI.route('/collections/')
@UI.route('/collections/page/<int:page>/')
@UI.route('/collections/<motif>/')
@UI.route('/collections/<motif>/page/<int:page>/')
def list_collections(motif=None, page=1):
''' Display the list of collections corresponding to the motif. '''
pattern = flask.request.args.get('motif', motif) or '*'
limit = flask.request.args.get('limit', APP.config['ITEMS_PER_PAGE'])
try:
limit = abs(int(limit))
except ValueError:
limit = APP.config['ITEMS_PER_PAGE']
flask.flash('Incorrect limit provided, using default', 'errors')
collections = pkgdblib.search_collection(
SESSION,
pattern=pattern,
page=page,
limit=limit,
)
collections_count = pkgdblib.search_collection(
SESSION,
pattern=pattern,
page=page,
limit=limit,
count=True
)
total_page = int(ceil(collections_count / float(limit)))
return flask.render_template(
'list_collections.html',
collections=collections,
motif=motif,
total_page=total_page,
page=page
)
@UI.route('/collection/<collection>/')
def collection_info(collection):
''' Display the information about the specified collection. '''
try:
collection = pkgdblib.search_collection(SESSION, collection)[0]
except IndexError:
flask.flash('No collection of this name found.', 'errors')
return flask.render_template('msg.html')
return flask.render_template(
'collection.html',
collection=collection,
)
@UI.route('/collection/<collection>/edit', methods=('GET', 'POST'))
@is_admin
def collection_edit(collection):
''' Allows to edit the information about the specified collection. '''
try:
collection = pkgdblib.search_collection(SESSION, collection)[0]
except IndexError:
flask.flash('No collection of this name found.', 'errors')
return flask.render_template('msg.html')
clt_status = pkgdblib.get_status(SESSION, 'clt_status')['clt_status']
form = pkgdb2.forms.AddCollectionForm(
clt_status=clt_status
)
if form.validate_on_submit():
clt_name = form.clt_name.data
clt_version = form.version.data
clt_status = form.clt_status.data
clt_branchname = form.branchname.data
clt_disttag = form.dist_tag.data
clt_koji_name = form.kojiname.data
try:
pkgdblib.edit_collection(
SESSION,
collection=collection,
clt_name=clt_name,
clt_version=clt_version,
clt_status=clt_status,
clt_branchname=clt_branchname,
clt_disttag=clt_disttag,
clt_koji_name=clt_koji_name,
user=flask.g.fas_user,
)
SESSION.commit()
flask.flash('Collection "%s" edited' % clt_branchname)
return flask.redirect(flask.url_for(
'.collection_info', collection=collection.branchname))
# In theory we should never hit this
except pkgdblib.PkgdbException, err: # pragma: no cover
SESSION.rollback()
flask.flash(str(err), 'errors')
elif flask.request.method == 'GET':
form = pkgdb2.forms.AddCollectionForm(
clt_status=clt_status,
collection=collection
)
return flask.render_template(
'collection_edit.html',
form=form,
collection=collection,
)
@UI.route('/new/collection/', methods=('GET', 'POST'))
@is_admin
def collection_new():
''' Page to create a new collection. '''
clt_status = pkgdblib.get_status(SESSION, 'clt_status')['clt_status']
form = pkgdb2.forms.AddCollectionForm(clt_status=clt_status)
if form.validate_on_submit():
clt_name = form.clt_name.data
clt_version = form.version.data
clt_status = form.clt_status.data
clt_branchname = form.branchname.data
clt_disttag = form.dist_tag.data
clt_koji_name = form.kojiname.data
try:
message = pkgdblib.add_collection(
SESSION,
clt_name=clt_name,
clt_version=clt_version,
clt_status=clt_status,
clt_branchname=clt_branchname,
clt_disttag=clt_disttag,
clt_koji_name=clt_koji_name,
user=flask.g.fas_user,
)
SESSION.commit()
flask.flash(message)
return flask.redirect(flask.url_for('.list_collections'))
# In theory we should never hit this
except pkgdblib.PkgdbException, err: # pragma: no cover
SESSION.rollback()
flask.flash(str(err), 'errors')
return flask.render_template(
'collection_new.html',
form=form,
)
| gpl-2.0 |
rghe/ansible | test/units/modules/network/nxos/test_nxos_bgp_neighbor_af.py | 18 | 4920 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp_neighbor_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpNeighborAfModule(TestNxosModule):
module = nxos_bgp_neighbor_af
def setUp(self):
super(TestNxosBgpNeighborAfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpNeighborAfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp_neighbor_af(self):
set_module_args(dict(asn=65535, neighbor='192.0.2.3', afi='ipv4',
safi='unicast', route_reflector_client=True))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], [
'router bgp 65535', 'neighbor 192.0.2.3', 'address-family ipv4 unicast',
'route-reflector-client'
])
def test_nxos_bgp_neighbor_af_exists(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_neighbor_af_absent(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast', state='absent'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'no address-family ipv4 unicast']
)
def test_nxos_bgp_neighbor_af_advertise_map(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast',
advertise_map_exist=['my_advertise_map', 'my_exist_map']))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'advertise-map my_advertise_map exist-map my_exist_map']
)
def test_nxos_bgp_neighbor_af_advertise_map_non_exist(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4', safi='unicast',
advertise_map_non_exist=['my_advertise_map', 'my_non_exist_map']))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'advertise-map my_advertise_map non-exist-map my_non_exist_map']
)
def test_nxos_bgp_neighbor_af_max_prefix_limit_default(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4',
safi='unicast', max_prefix_limit='default'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'no maximum-prefix']
)
def test_nxos_bgp_neighbor_af_max_prefix(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4',
safi='unicast', max_prefix_threshold=20,
max_prefix_limit=20))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'maximum-prefix 20 20']
)
def test_nxos_bgp_neighbor_af_disable_peer_as_check(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.5', afi='ipv4',
safi='unicast', disable_peer_as_check=True))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'neighbor 3.3.3.5', 'address-family ipv4 unicast', 'disable-peer-as-check']
)
| gpl-3.0 |
stweil/letsencrypt | acme/acme/fields.py | 53 | 1742 | """ACME JSON fields."""
import logging
import pyrfc3339
from acme import jose
logger = logging.getLogger(__name__)
class Fixed(jose.Field):
"""Fixed field."""
def __init__(self, json_name, value):
self.value = value
super(Fixed, self).__init__(
json_name=json_name, default=value, omitempty=False)
def decode(self, value):
if value != self.value:
raise jose.DeserializationError('Expected {0!r}'.format(self.value))
return self.value
def encode(self, value):
if value != self.value:
logger.warn(
'Overriding fixed field (%s) with %r', self.json_name, value)
return value
class RFC3339Field(jose.Field):
"""RFC3339 field encoder/decoder.
Handles decoding/encoding between RFC3339 strings and aware (not
naive) `datetime.datetime` objects
(e.g. ``datetime.datetime.now(pytz.utc)``).
"""
@classmethod
def default_encoder(cls, value):
return pyrfc3339.generate(value)
@classmethod
def default_decoder(cls, value):
try:
return pyrfc3339.parse(value)
except ValueError as error:
raise jose.DeserializationError(error)
class Resource(jose.Field):
"""Resource MITM field."""
def __init__(self, resource_type, *args, **kwargs):
self.resource_type = resource_type
super(Resource, self).__init__(
'resource', default=resource_type, *args, **kwargs)
def decode(self, value):
if value != self.resource_type:
raise jose.DeserializationError(
'Wrong resource type: {0} instead of {1}'.format(
value, self.resource_type))
return value
| apache-2.0 |
cernops/nova | nova/tests/unit/console/test_websocketproxy.py | 10 | 13858 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for nova websocketproxy."""
import mock
from nova.console import websocketproxy
from nova import exception
from nova import test
class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase):
def setUp(self):
super(NovaProxyRequestHandlerBaseTestCase, self).setUp()
self.flags(console_allowed_origins = ['allowed-origin-example-1.net',
'allowed-origin-example-2.net'])
self.wh = websocketproxy.NovaProxyRequestHandlerBase()
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
def _fake_getheader(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_ipv6(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://[2001:db8::1]:6080'
elif header == 'Host':
return '[2001:db8::1]:6080'
else:
return
def _fake_getheader_bad_token(self, header):
if header == 'cookie':
return 'token="XXX"'
elif header == 'Origin':
return 'https://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_bad_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://bad-origin-example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_allowed_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://allowed-origin-example-2.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_blank_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return ''
elif header == 'Host':
return 'example.net:6080'
else:
return
def _fake_getheader_no_origin(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return None
elif header == 'Host':
return 'any-example.net:6080'
else:
return
def _fake_getheader_http(self, header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'http://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_ipv6_url(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://[2001:db8::1]:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://[2001:db8::1]/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader_ipv6
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_token_invalid(self, check_token):
check_token.return_value = False
self.wh.path = "http://127.0.0.1/?token=XXX"
self.wh.headers.getheader = self._fake_getheader_bad_token
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
check_token.assert_called_with(mock.ANY, token="XXX")
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_internal_access_path(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_internal_access_path_err(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'internal_access_path': 'xxx',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.InvalidConnectionInfo,
self.wh.new_websocket_client)
check_token.assert_called_with(mock.ANY, token="123-456-789")
@mock.patch('sys.version_info')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_py273_good_scheme(
self, check_token, version_info):
version_info.return_value = (2, 7, 3)
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('sys.version_info')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_py273_special_scheme(
self, check_token, version_info):
version_info.return_value = (2, 7, 3)
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "ws://127.0.0.1/?token=123-456-789"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.NovaException,
self.wh.new_websocket_client)
@mock.patch('socket.getfqdn')
def test_address_string_doesnt_do_reverse_dns_lookup(self, getfqdn):
request_mock = mock.MagicMock()
request_mock.makefile().readline.side_effect = [
'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n',
''
]
server_mock = mock.MagicMock()
client_address = ('8.8.8.8', 54321)
handler = websocketproxy.NovaProxyRequestHandler(
request_mock, client_address, server_mock)
handler.log_message('log message using client address context info')
self.assertFalse(getfqdn.called) # no reverse dns look up
self.assertEqual(handler.address_string(), '8.8.8.8') # plain address
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_bad_origin_header(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_bad_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_allowed_origin_header(self,
check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'https://example.net:6080'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_allowed_origin
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_blank_origin_header(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_blank_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_no_origin_header(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader_no_origin
self.wh.new_websocket_client()
check_token.assert_called_with(mock.ANY, token="123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_https_origin_proto_http(self,
check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url': 'http://example.net:6080'
}
self.wh.path = "https://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_https_origin_proto_ws(self,
check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'serial',
'access_url': 'ws://example.net:6080'
}
self.wh.path = "https://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.consoleauth.rpcapi.ConsoleAuthAPI.check_token')
def test_new_websocket_client_novnc_bad_console_type(self, check_token):
check_token.return_value = {
'host': 'node1',
'port': '10000',
'console_type': 'bad-console-type'
}
self.wh.path = "http://127.0.0.1/"
self.wh.headers.getheader = self._fake_getheader
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
| apache-2.0 |
spacy-io/spaCy | spacy/displacy/render.py | 2 | 13219 | from typing import Dict, Any, List, Optional, Union
import uuid
from .templates import TPL_DEP_SVG, TPL_DEP_WORDS, TPL_DEP_WORDS_LEMMA, TPL_DEP_ARCS
from .templates import TPL_ENT, TPL_ENT_RTL, TPL_FIGURE, TPL_TITLE, TPL_PAGE
from .templates import TPL_ENTS
from ..util import minify_html, escape_html, registry
from ..errors import Errors
DEFAULT_LANG = "en"
DEFAULT_DIR = "ltr"
DEFAULT_ENTITY_COLOR = "#ddd"
DEFAULT_LABEL_COLORS = {
"ORG": "#7aecec",
"PRODUCT": "#bfeeb7",
"GPE": "#feca74",
"LOC": "#ff9561",
"PERSON": "#aa9cfc",
"NORP": "#c887fb",
"FACILITY": "#9cc9cc",
"EVENT": "#ffeb80",
"LAW": "#ff8197",
"LANGUAGE": "#ff8197",
"WORK_OF_ART": "#f0d0ff",
"DATE": "#bfe1d9",
"TIME": "#bfe1d9",
"MONEY": "#e4e7d2",
"QUANTITY": "#e4e7d2",
"ORDINAL": "#e4e7d2",
"CARDINAL": "#e4e7d2",
"PERCENT": "#e4e7d2",
}
class DependencyRenderer:
"""Render dependency parses as SVGs."""
style = "dep"
def __init__(self, options: Dict[str, Any] = {}) -> None:
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (compact, word_spacing,
arrow_spacing, arrow_width, arrow_stroke, distance, offset_x,
color, bg, font)
"""
self.compact = options.get("compact", False)
self.word_spacing = options.get("word_spacing", 45)
self.arrow_spacing = options.get("arrow_spacing", 12 if self.compact else 20)
self.arrow_width = options.get("arrow_width", 6 if self.compact else 10)
self.arrow_stroke = options.get("arrow_stroke", 2)
self.distance = options.get("distance", 150 if self.compact else 175)
self.offset_x = options.get("offset_x", 50)
self.color = options.get("color", "#000000")
self.bg = options.get("bg", "#ffffff")
self.font = options.get("font", "Arial")
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
def render(
self, parsed: List[Dict[str, Any]], page: bool = False, minify: bool = False
) -> str:
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (str): Rendered SVG or HTML markup.
"""
# Create a random ID prefix to make sure parses don't receive the
# same ID, even if they're identical
id_prefix = uuid.uuid4().hex
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
self.lang = settings.get("lang", DEFAULT_LANG)
render_id = f"{id_prefix}-{i}"
svg = self.render_svg(render_id, p["words"], p["arcs"])
rendered.append(svg)
if page:
content = "".join([TPL_FIGURE.format(content=svg) for svg in rendered])
markup = TPL_PAGE.format(
content=content, lang=self.lang, dir=self.direction
)
else:
markup = "".join(rendered)
if minify:
return minify_html(markup)
return markup
def render_svg(
self,
render_id: Union[int, str],
words: List[Dict[str, Any]],
arcs: List[Dict[str, Any]],
) -> str:
"""Render SVG.
render_id (Union[int, str]): Unique ID, typically index of document.
words (list): Individual words and their tags.
arcs (list): Individual arcs and their start, end, direction and label.
RETURNS (str): Rendered SVG markup.
"""
self.levels = self.get_levels(arcs)
self.highest_level = len(self.levels)
self.offset_y = self.distance / 2 * self.highest_level + self.arrow_stroke
self.width = self.offset_x + len(words) * self.distance
self.height = self.offset_y + 3 * self.word_spacing
self.id = render_id
words_svg = [
self.render_word(w["text"], w["tag"], w.get("lemma", None), i)
for i, w in enumerate(words)
]
arcs_svg = [
self.render_arrow(a["label"], a["start"], a["end"], a["dir"], i)
for i, a in enumerate(arcs)
]
content = "".join(words_svg) + "".join(arcs_svg)
return TPL_DEP_SVG.format(
id=self.id,
width=self.width,
height=self.height,
color=self.color,
bg=self.bg,
font=self.font,
content=content,
dir=self.direction,
lang=self.lang,
)
def render_word(self, text: str, tag: str, lemma: str, i: int) -> str:
"""Render individual word.
text (str): Word text.
tag (str): Part-of-speech tag.
i (int): Unique ID, typically word index.
RETURNS (str): Rendered SVG markup.
"""
y = self.offset_y + self.word_spacing
x = self.offset_x + i * self.distance
if self.direction == "rtl":
x = self.width - x
html_text = escape_html(text)
if lemma is not None:
return TPL_DEP_WORDS_LEMMA.format(
text=html_text, tag=tag, lemma=lemma, x=x, y=y
)
return TPL_DEP_WORDS.format(text=html_text, tag=tag, x=x, y=y)
def render_arrow(
self, label: str, start: int, end: int, direction: str, i: int
) -> str:
"""Render individual arrow.
label (str): Dependency label.
start (int): Index of start word.
end (int): Index of end word.
direction (str): Arrow direction, 'left' or 'right'.
i (int): Unique ID, typically arrow index.
RETURNS (str): Rendered SVG markup.
"""
if start < 0 or end < 0:
error_args = dict(start=start, end=end, label=label, dir=direction)
raise ValueError(Errors.E157.format(**error_args))
level = self.levels.index(end - start) + 1
x_start = self.offset_x + start * self.distance + self.arrow_spacing
if self.direction == "rtl":
x_start = self.width - x_start
y = self.offset_y
x_end = (
self.offset_x
+ (end - start) * self.distance
+ start * self.distance
- self.arrow_spacing * (self.highest_level - level) / 4
)
if self.direction == "rtl":
x_end = self.width - x_end
y_curve = self.offset_y - level * self.distance / 2
if self.compact:
y_curve = self.offset_y - level * self.distance / 6
if y_curve == 0 and len(self.levels) > 5:
y_curve = -self.distance
arrowhead = self.get_arrowhead(direction, x_start, y, x_end)
arc = self.get_arc(x_start, y, y_curve, x_end)
label_side = "right" if self.direction == "rtl" else "left"
return TPL_DEP_ARCS.format(
id=self.id,
i=i,
stroke=self.arrow_stroke,
head=arrowhead,
label=label,
label_side=label_side,
arc=arc,
)
def get_arc(self, x_start: int, y: int, y_curve: int, x_end: int) -> str:
"""Render individual arc.
x_start (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
y_curve (int): Y-corrdinate of Cubic Bézier y_curve point.
x_end (int): X-coordinate of arrow end point.
RETURNS (str): Definition of the arc path ('d' attribute).
"""
template = "M{x},{y} C{x},{c} {e},{c} {e},{y}"
if self.compact:
template = "M{x},{y} {x},{c} {e},{c} {e},{y}"
return template.format(x=x_start, y=y, c=y_curve, e=x_end)
def get_arrowhead(self, direction: str, x: int, y: int, end: int) -> str:
"""Render individual arrow head.
direction (str): Arrow direction, 'left' or 'right'.
x (int): X-coordinate of arrow start point.
y (int): Y-coordinate of arrow start and end point.
end (int): X-coordinate of arrow end point.
RETURNS (str): Definition of the arrow head path ('d' attribute).
"""
if direction == "left":
p1, p2, p3 = (x, x - self.arrow_width + 2, x + self.arrow_width - 2)
else:
p1, p2, p3 = (end, end + self.arrow_width - 2, end - self.arrow_width + 2)
return f"M{p1},{y + 2} L{p2},{y - self.arrow_width} {p3},{y - self.arrow_width}"
def get_levels(self, arcs: List[Dict[str, Any]]) -> List[int]:
"""Calculate available arc height "levels".
Used to calculate arrow heights dynamically and without wasting space.
args (list): Individual arcs and their start, end, direction and label.
RETURNS (list): Arc levels sorted from lowest to highest.
"""
levels = set(map(lambda arc: arc["end"] - arc["start"], arcs))
return sorted(list(levels))
class EntityRenderer:
"""Render named entities as HTML."""
style = "ent"
def __init__(self, options: Dict[str, Any] = {}) -> None:
"""Initialise dependency renderer.
options (dict): Visualiser-specific options (colors, ents)
"""
colors = dict(DEFAULT_LABEL_COLORS)
user_colors = registry.displacy_colors.get_all()
for user_color in user_colors.values():
if callable(user_color):
# Since this comes from the function registry, we want to make
# sure we support functions that *return* a dict of colors
user_color = user_color()
if not isinstance(user_color, dict):
raise ValueError(Errors.E925.format(obj=type(user_color)))
colors.update(user_color)
colors.update(options.get("colors", {}))
self.default_color = DEFAULT_ENTITY_COLOR
self.colors = {label.upper(): color for label, color in colors.items()}
self.ents = options.get("ents", None)
if self.ents is not None:
self.ents = [ent.upper() for ent in self.ents]
self.direction = DEFAULT_DIR
self.lang = DEFAULT_LANG
template = options.get("template")
if template:
self.ent_template = template
else:
if self.direction == "rtl":
self.ent_template = TPL_ENT_RTL
else:
self.ent_template = TPL_ENT
def render(
self, parsed: List[Dict[str, Any]], page: bool = False, minify: bool = False
) -> str:
"""Render complete markup.
parsed (list): Dependency parses to render.
page (bool): Render parses wrapped as full HTML page.
minify (bool): Minify HTML markup.
RETURNS (str): Rendered HTML markup.
"""
rendered = []
for i, p in enumerate(parsed):
if i == 0:
settings = p.get("settings", {})
self.direction = settings.get("direction", DEFAULT_DIR)
self.lang = settings.get("lang", DEFAULT_LANG)
rendered.append(self.render_ents(p["text"], p["ents"], p.get("title")))
if page:
docs = "".join([TPL_FIGURE.format(content=doc) for doc in rendered])
markup = TPL_PAGE.format(content=docs, lang=self.lang, dir=self.direction)
else:
markup = "".join(rendered)
if minify:
return minify_html(markup)
return markup
def render_ents(
self, text: str, spans: List[Dict[str, Any]], title: Optional[str]
) -> str:
"""Render entities in text.
text (str): Original text.
spans (list): Individual entity spans and their start, end and label.
title (str / None): Document title set in Doc.user_data['title'].
"""
markup = ""
offset = 0
for span in spans:
label = span["label"]
start = span["start"]
end = span["end"]
additional_params = span.get("params", {})
entity = escape_html(text[start:end])
fragments = text[offset:start].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
if self.ents is None or label.upper() in self.ents:
color = self.colors.get(label.upper(), self.default_color)
ent_settings = {"label": label, "text": entity, "bg": color}
ent_settings.update(additional_params)
markup += self.ent_template.format(**ent_settings)
else:
markup += entity
offset = end
fragments = text[offset:].split("\n")
for i, fragment in enumerate(fragments):
markup += escape_html(fragment)
if len(fragments) > 1 and i != len(fragments) - 1:
markup += "</br>"
markup = TPL_ENTS.format(content=markup, dir=self.direction)
if title:
markup = TPL_TITLE.format(title=title) + markup
return markup
| mit |
daonb/obudget | src/server/budget_lines/management/commands/budget_lines_csv_to_db.py | 1 | 1996 | import sys
import csv
import re
from obudget.budget_lines.models import BudgetLine
from django.core.management.base import BaseCommand
class Command(BaseCommand):
args = '<csv-file>'
help = 'Parses csv''d budget data into the DB'
def handle(self, *args, **options):
reader = csv.DictReader(file(args[0]), ['year','title','budget_id','allocated','revised','used'])
print 'Deleting current rows'
BudgetLine.objects.all().delete()
print 'Loading raw rows'
x = set()
k = 0
for d in reader:
key = d['budget_id'], d['year']
if key in x:
continue
x.add(key)
BudgetLine( title = d['title'].decode('utf8'),
budget_id = d['budget_id'],
amount_allocated = int(d['allocated']),
amount_revised = int(d['revised']),
amount_used = int(d['used']),
year = int(d['year']),
budget_id_len = len(d['budget_id']),
).save()
k+=1
if k % 1000 == 0:
print k
# Update internal relationships in the DB
print 'Internal relationships'
k = 0
for line in BudgetLine.objects.all():
k+=1
if k % 1000 == 0:
print k
if line.budget_id == None or len(line.budget_id) == 2:
continue
for i in range(2,len(line.budget_id),2):
parents = BudgetLine.objects.filter( year = line.year, budget_id = line.budget_id[:-i] ).count()
if parents > 0:
parent = BudgetLine.objects.get( year = line.year, budget_id = line.budget_id[:-i] )
line.containing_line = parent
line.save()
break
| bsd-3-clause |
CloudServer/nova | nova/api/openstack/compute/contrib/baremetal_nodes.py | 60 | 6552 | # Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The bare-metal admin extension with Ironic Proxy."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
authorize = extensions.extension_authorizer('compute', 'baremetal_nodes')
node_fields = ['id', 'cpus', 'local_gb', 'memory_mb', 'pm_address',
'pm_user', 'service_host', 'terminal_port', 'instance_uuid']
node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
CONF = cfg.CONF
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('api_endpoint',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_username',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_password',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
CONF.import_opt('compute_driver', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
def _check_ironic_client_enabled():
"""Check whether Ironic is installed or not."""
if ironic_client is None:
msg = _("Ironic client unavailable, cannot access Ironic.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
def _get_ironic_client():
"""return an Ironic client."""
# TODO(NobodyCam): Fix insecure setting
kwargs = {'os_username': CONF.ironic.admin_username,
'os_password': CONF.ironic.admin_password,
'os_auth_url': CONF.ironic.admin_url,
'os_tenant_name': CONF.ironic.admin_tenant_name,
'os_service_type': 'baremetal',
'os_endpoint_type': 'public',
'insecure': 'true',
'ironic_url': CONF.ironic.api_endpoint}
ironicclient = ironic_client.get_client(CONF.ironic.api_version, **kwargs)
return ironicclient
def _no_ironic_proxy(cmd):
raise webob.exc.HTTPBadRequest(
explanation=_("Command Not supported. Please use Ironic "
"command %(cmd)s to perform this "
"action.") % {'cmd': cmd})
class BareMetalNodeController(wsgi.Controller):
"""The Bare-Metal Node API controller for the OpenStack API.
Ironic is used for the following commands:
'baremetal-node-list'
'baremetal-node-show'
"""
def __init__(self, ext_mgr=None, *args, **kwargs):
super(BareMetalNodeController, self).__init__(*args, **kwargs)
self.ext_mgr = ext_mgr
def _node_dict(self, node_ref):
d = {}
for f in node_fields:
d[f] = node_ref.get(f)
if self.ext_mgr.is_loaded('os-baremetal-ext-status'):
for f in node_ext_fields:
d[f] = node_ref.get(f)
return d
def index(self, req):
context = req.environ['nova.context']
authorize(context)
nodes = []
# proxy command to Ironic
_check_ironic_client_enabled()
ironicclient = _get_ironic_client()
ironic_nodes = ironicclient.node.list(detail=True)
for inode in ironic_nodes:
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0)}
nodes.append(node)
return {'nodes': nodes}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
# proxy command to Ironic
_check_ironic_client_enabled()
icli = _get_ironic_client()
try:
inode = icli.node.get(id)
except ironic_exc.NotFound:
msg = _("Node %s could not be found.") % id
raise webob.exc.HTTPNotFound(explanation=msg)
iports = icli.node.list_ports(id)
node = {'id': inode.uuid,
'interfaces': [],
'host': 'IRONIC MANAGED',
'task_state': inode.provision_state,
'cpus': inode.properties.get('cpus', 0),
'memory_mb': inode.properties.get('memory_mb', 0),
'disk_gb': inode.properties.get('local_gb', 0),
'instance_uuid': inode.instance_uuid}
for port in iports:
node['interfaces'].append({'address': port.address})
return {'node': node}
def create(self, req, body):
_no_ironic_proxy("port-create")
def delete(self, req, id):
_no_ironic_proxy("port-create")
@wsgi.action('add_interface')
def _add_interface(self, req, id, body):
_no_ironic_proxy("port-create")
@wsgi.action('remove_interface')
def _remove_interface(self, req, id, body):
_no_ironic_proxy("port-delete")
class Baremetal_nodes(extensions.ExtensionDescriptor):
"""Admin-only bare-metal node administration."""
name = "BareMetalNodes"
alias = "os-baremetal-nodes"
namespace = "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2"
updated = "2013-01-04T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-baremetal-nodes',
BareMetalNodeController(self.ext_mgr),
member_actions={"action": "POST", })
resources.append(res)
return resources
| apache-2.0 |
HewlettPackard/oneview-ansible | build-doc/module_docs_fragments/oneview.py | 1 | 2367 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
class ModuleDocFragment(object):
# OneView doc fragment
DOCUMENTATION = '''
options:
config:
description:
- Path to a .json configuration file containing the OneView client configuration.
The configuration file is optional. If the file path is not provided, the configuration will be loaded from
environment variables.
required: false
notes:
- "A sample configuration file for the config parameter can be found at:
U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)"
- "Check how to use environment variables for configuration at:
U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)"
- "Additional Playbooks for the HPE OneView Ansible modules can be found at:
U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)"
'''
VALIDATEETAG = '''
options:
validate_etag:
description:
- When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag
for the resource matches the ETag provided in the data.
default: true
choices: ['true', 'false']
'''
FACTSPARAMS = '''
options:
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
C(start): The first item to return, using 0-based indexing.
C(count): The number of resources to return.
C(filter): A general filter/query string to narrow the list of items returned.
C(sort): The sort order of the returned data set."
required: false
'''
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/unittest/main.py | 84 | 9759 | """Unittest main program"""
import sys
import argparse
import os
from . import loader, runner
from .signals import installHandler
__unittest = True
MAIN_EXAMPLES = """\
Examples:
%(prog)s test_module - run tests from test_module
%(prog)s module.TestClass - run tests from module.TestClass
%(prog)s module.Class.test_method - run specified test method
"""
MODULE_EXAMPLES = """\
Examples:
%(prog)s - run default set of tests
%(prog)s MyTestSuite - run suite 'MyTestSuite'
%(prog)s MyTestCase.testSomething - run MyTestCase.testSomething
%(prog)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return name[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
# defaults for testing
module=None
verbosity = 1
failfast = catchbreak = buffer = progName = warnings = None
_discovery_parser = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
if warnings is None and not sys.warnoptions:
# even if DreprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
if self._discovery_parser is None:
self._initArgParsers()
self._print_help()
sys.exit(2)
def _print_help(self, *args, **kwargs):
if self.module is None:
print(self._main_parser.format_help())
print(MAIN_EXAMPLES % {'prog': self.progName})
self._discovery_parser.print_help()
else:
print(self._main_parser.format_help())
print(MODULE_EXAMPLES % {'prog': self.progName})
def parseArgs(self, argv):
self._initArgParsers()
if self.module is None:
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
self._main_parser.parse_args(argv[1:], self)
if not self.tests:
# this allows "python -m unittest -v" to still work for
# test discovery.
self._do_discovery([])
return
else:
self._main_parser.parse_args(argv[1:], self)
if self.tests:
self.testNames = _convert_names(self.tests)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
elif self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif isinstance(self.defaultTest, str):
self.testNames = (self.defaultTest,)
else:
self.testNames = list(self.defaultTest)
self.createTests()
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _initArgParsers(self):
parent_parser = self._getParentArgParser()
self._main_parser = self._getMainArgParser(parent_parser)
self._discovery_parser = self._getDiscoveryArgParser(parent_parser)
def _getParentArgParser(self):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-v', '--verbose', dest='verbosity',
action='store_const', const=2,
help='Verbose output')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0,
help='Quiet output')
if self.failfast is None:
parser.add_argument('-f', '--failfast', dest='failfast',
action='store_true',
help='Stop on first fail or error')
self.failfast = False
if self.catchbreak is None:
parser.add_argument('-c', '--catch', dest='catchbreak',
action='store_true',
help='Catch ctrl-C and display results so far')
self.catchbreak = False
if self.buffer is None:
parser.add_argument('-b', '--buffer', dest='buffer',
action='store_true',
help='Buffer stdout and stderr during tests')
self.buffer = False
return parser
def _getMainArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = self.progName
parser.print_help = self._print_help
parser.add_argument('tests', nargs='*',
help='a list of any number of test modules, '
'classes and test methods.')
return parser
def _getDiscoveryArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = '%s discover' % self.progName
parser.epilog = ('For test discovery all test modules must be '
'importable from the top level directory of the '
'project.')
parser.add_argument('-s', '--start-directory', dest='start',
help="Directory to start discovery ('.' default)")
parser.add_argument('-p', '--pattern', dest='pattern',
help="Pattern to match tests ('test*.py' default)")
parser.add_argument('-t', '--top-level-directory', dest='top',
help='Top level directory of project (defaults to '
'start directory)')
for arg in ('start', 'pattern', 'top'):
parser.add_argument(arg, nargs='?',
default=argparse.SUPPRESS,
help=argparse.SUPPRESS)
return parser
def _do_discovery(self, argv, Loader=None):
self.start = '.'
self.pattern = 'test*.py'
self.top = None
if argv is not None:
# handle command line args for test discovery
if self._discovery_parser is None:
# for testing
self._initArgParsers()
self._discovery_parser.parse_args(argv, self)
loader = self.testLoader if Loader is None else Loader()
self.test = loader.discover(self.start, self.pattern, self.top)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| gpl-2.0 |
farodin91/servo | components/script/dom/bindings/codegen/parser/tests/test_replaceable.py | 138 | 1833 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def should_throw(parser, harness, message, code):
parser = parser.reset();
threw = False
try:
parser.parse(code)
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown: %s" % message)
def WebIDLTest(parser, harness):
# The [Replaceable] extended attribute MUST take no arguments.
should_throw(parser, harness, "no arguments", """
interface I {
[Replaceable=X] readonly attribute long A;
};
""")
# An attribute with the [Replaceable] extended attribute MUST NOT also be
# declared with the [PutForwards] extended attribute.
should_throw(parser, harness, "PutForwards", """
interface I {
[PutForwards=B, Replaceable] readonly attribute J A;
};
interface J {
attribute long B;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on an attribute
# that is not read only.
should_throw(parser, harness, "writable attribute", """
interface I {
[Replaceable] attribute long A;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on a static
# attribute.
should_throw(parser, harness, "static attribute", """
interface I {
[Replaceable] static readonly attribute long A;
};
""")
# The [Replaceable] extended attribute MUST NOT be used on an attribute
# declared on a callback interface.
should_throw(parser, harness, "callback interface", """
callback interface I {
[Replaceable] readonly attribute long A;
};
""")
| mpl-2.0 |
noroot/zulip | api/integrations/codebase/zulip_codebase_config.py | 124 | 2537 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Change these values to configure authentication for your codebase account
# Note that this is the Codebase API Username, found in the Settings page
# for your account
CODEBASE_API_USERNAME = "[email protected]"
CODEBASE_API_KEY = "1234561234567abcdef"
# The URL of your codebase setup
CODEBASE_ROOT_URL = "https://YOUR_COMPANY.codebasehq.com"
# When initially started, how many hours of messages to include.
# Note that the Codebase API only returns the 20 latest events,
# if you have more than 20 events that fit within this window,
# earlier ones may be lost
CODEBASE_INITIAL_HISTORY_HOURS = 12
# Change these values to configure Zulip authentication for the plugin
ZULIP_USER = "[email protected]"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# The streams to send commit information and ticket information to
ZULIP_COMMITS_STREAM_NAME = "codebase"
ZULIP_TICKETS_STREAM_NAME = "tickets"
# If properly installed, the Zulip API should be in your import
# path, but if not, set a custom path below
ZULIP_API_PATH = None
# Set this to your Zulip API server URI
ZULIP_SITE = "https://api.zulip.com"
# If you wish to log to a file rather than stdout/stderr,
# please fill this out your desired path
LOG_FILE = None
# This file is used to resume this mirror in case the script shuts down.
# It is required and needs to be writeable.
RESUME_FILE = "/var/tmp/zulip_codebase.state"
| apache-2.0 |
evfredericksen/pynacea | pynhost/pynhost/platforms/winconstants.py | 1 | 3710 | import ctypes as ct
class POINT(ct.Structure):
_fields_ = [("x", ct.c_ulong), ("y", ct.c_ulong)]
PUL = ct.POINTER(ct.c_ulong)
GMEM_DDESHARE = 0x2000
class KEYBOARD_INPUT(ct.Structure):
_fields_ = [("wVk", ct.c_ushort),
("wScan", ct.c_ushort),
("dwFlags", ct.c_ulong),
("time", ct.c_ulong),
("dwExtraInfo", PUL)]
class HARDWARE_INPUT(ct.Structure):
_fields_ = [("uMsg", ct.c_ulong),
("wParamL", ct.c_short),
("wParamH", ct.c_ushort)]
class MOUSE_INPUT(ct.Structure):
_fields_ = [("dx", ct.c_long),
("dy", ct.c_long),
("mouseData", ct.c_ulong),
("dwFlags", ct.c_ulong),
("time",ct.c_ulong),
("dwExtraInfo", PUL)]
class INPUT_I(ct.Union):
_fields_ = [("ki", KEYBOARD_INPUT),
("mi", MOUSE_INPUT),
("hi", HARDWARE_INPUT)]
class INPUT(ct.Structure):
_fields_ = [("type", ct.c_ulong),
("ii", INPUT_I)]
WINDOWS_KEYCODES = {
'lmouse': 0x01,
'rmouse': 0x02,
'cancel': 0x03,
'mmouse': 0x04,
'x1mouse': 0x05,
'x2mouse': 0x06,
'back': 0x08,
'backspace': 0x08,
'tab': 0x09,
'clear': 0x0C,
'enter': 0x0D,
'return': 0x0D,
'\n': 0x0D,
'\r\n': 0x0D,
'shift': 0x10,
'ctrl': 0x11,
'control': 0x11,
'alt': 0x12,
'caps': 0x14,
'esc': 0x1B,
'escape': 0x1B,
' ': 0x20,
'pageup': 0x21,
'page_up': 0x21,
'pagedown': 0x22,
'page_down': 0x22,
'end': 0x23,
'home': 0x24,
'left': 0x25,
'up': 0x26,
'right': 0x27,
'down': 0x28,
'select': 0x29,
'print': 0x2A,
'execute': 0x2B,
'print_screen': 0x2C,
'insert': 0x2D,
'del': 0x2E,
'delete': 0x2E,
'help': 0X2F,
'0': 0x30,
'1': 0x31,
'2': 0x32,
'3': 0x33,
'4': 0x34,
'5': 0x35,
'6': 0x36,
'7': 0x37,
'8': 0x38,
'9': 0x39,
'a': 0x41,
'b': 0x42,
'c': 0x43,
'd': 0x44,
'e': 0x45,
'f': 0x46,
'g': 0x47,
'h': 0x48,
'i': 0x49,
'j': 0x4A,
'k': 0x4B,
'l': 0x4C,
'm': 0x4D,
'n': 0x4E,
'o': 0x4F,
'p': 0x50,
'q': 0x51,
'r': 0x52,
's': 0x53,
't': 0x54,
'u': 0x55,
'v': 0x56,
'w': 0x57,
'x': 0x58,
'y': 0x59,
'z': 0x5A,
'lwindows': 0x5B,
'rwindows': 0x5C,
'apps': 0x5D,
'sleep': 0x5F,
'numpad0': 0x60,
'numpad1': 0x61,
'numpad2': 0x62,
'numpad3': 0x63,
'numpad4': 0x64,
'numpad5': 0x65,
'numpad6': 0x66,
'numpad7': 0x67,
'numpad8': 0x68,
'numpad9': 0x69,
'f1': 0x70,
'f2': 0x71,
'f3': 0x72,
'f4': 0x73,
'f5': 0x74,
'f6': 0x75,
'f7': 0x76,
'f8': 0x77,
'f9': 0x78,
'f10': 0x79,
'f11': 0x7A,
'f12': 0x7B,
'f13': 0x7C,
'f14': 0x7D,
'f15': 0x7E,
'f16': 0x7F,
'f17': 0x80,
'f18': 0x81,
'f19': 0x82,
'f20': 0x83,
'f21': 0x84,
'f22': 0x85,
'f23': 0x86,
'f24': 0x87,
'numlock': 0x90,
'scroll': 0x91,
'lshift': 0xA0,
'rshift': 0xA1,
'mute': 0xAD,
'volume_up': 0xAE,
'volume_down': 0xAF,
'.': 0xBE,
',': 0xBC,
';': 0xBA,
"'": 0xDE,
'/': 0xBF,
'`': 0xC0,
'-': 0xBD,
'=': 0xBB,
'[': 0xDB,
'\\': 0xDC,
']': 0xDD,
}
WINDOWS_SHIFT_MAP = {
')': '0',
'!': '1',
'@': '2',
'#': '3',
'$': '4',
'%': '5',
'^': '6',
'&': '7',
'*': '8',
'(': '9',
'<': ',',
'>': '.',
'?': '/',
'"': "'",
':': ';',
'{': '[',
'}': ']',
'|': '\\',
'~': '`',
'_': '-',
'+': '=',
}
| mit |
navycrow/Sick-Beard | lib/hachoir_parser/game/blp.py | 90 | 11108 | """
Blizzard BLP Image File Parser
Author: Robert Xiao
Creation date: July 10 2007
- BLP1 File Format
http://magos.thejefffiles.com/War3ModelEditor/MagosBlpFormat.txt
- BLP2 File Format (Wikipedia)
http://en.wikipedia.org/wiki/.BLP
- S3TC (DXT1, 3, 5) Formats
http://en.wikipedia.org/wiki/S3_Texture_Compression
"""
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.field import String, UInt32, UInt8, Enum, FieldSet, RawBytes, GenericVector, Bit, Bits
from lib.hachoir_parser.parser import Parser
from lib.hachoir_parser.image.common import PaletteRGBA
from lib.hachoir_core.tools import alignValue
class PaletteIndex(UInt8):
def createDescription(self):
return "Palette index %i (%s)" % (self.value, self["/palette/color[%i]" % self.value].description)
class Generic2DArray(FieldSet):
def __init__(self, parent, name, width, height, item_class, row_name="row", item_name="item", *args, **kwargs):
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.width = width
self.height = height
self.item_class = item_class
self.row_name = row_name
self.item_name = item_name
def createFields(self):
for i in xrange(self.height):
yield GenericVector(self, self.row_name+"[]", self.width, self.item_class, self.item_name)
class BLP1File(Parser):
MAGIC = "BLP1"
PARSER_TAGS = {
"id": "blp1",
"category": "game",
"file_ext": ("blp",),
"mime": (u"application/x-blp",), # TODO: real mime type???
"magic": ((MAGIC, 0),),
"min_size": 7*32, # 7 DWORDs start, incl. magic
"description": "Blizzard Image Format, version 1",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "BLP1":
return "Invalid magic"
return True
def createFields(self):
yield String(self, "magic", 4, "Signature (BLP1)")
yield Enum(UInt32(self, "compression"), {
0:"JPEG Compression",
1:"Uncompressed"})
yield UInt32(self, "flags")
yield UInt32(self, "width")
yield UInt32(self, "height")
yield Enum(UInt32(self, "type"), {
3:"Uncompressed Index List + Alpha List",
4:"Uncompressed Index List + Alpha List",
5:"Uncompressed Index List"})
yield UInt32(self, "subtype")
for i in xrange(16):
yield UInt32(self, "mipmap_offset[]")
for i in xrange(16):
yield UInt32(self, "mipmap_size[]")
compression = self["compression"].value
image_type = self["type"].value
width = self["width"].value
height = self["height"].value
if compression == 0: # JPEG Compression
yield UInt32(self, "jpeg_header_len")
yield RawBytes(self, "jpeg_header", self["jpeg_header_len"].value, "Shared JPEG Header")
else:
yield PaletteRGBA(self, "palette", 256)
offsets = self.array("mipmap_offset")
sizes = self.array("mipmap_size")
for i in xrange(16):
if not offsets[i].value or not sizes[i].value:
continue
padding = self.seekByte(offsets[i].value)
if padding:
yield padding
if compression == 0:
yield RawBytes(self, "mipmap[%i]" % i, sizes[i].value, "JPEG data, append to header to recover complete image")
elif compression == 1:
yield Generic2DArray(self, "mipmap_indexes[%i]" % i, width, height, PaletteIndex, "row", "index", "Indexes into the palette")
if image_type in (3, 4):
yield Generic2DArray(self, "mipmap_alphas[%i]" % i, width, height, UInt8, "row", "alpha", "Alpha values")
width /= 2
height /= 2
def interp_avg(data_low, data_high, n):
"""Interpolated averages. For example,
>>> list(interp_avg(1, 10, 3))
[4, 7]
"""
if isinstance(data_low, (int, long)):
for i in range(1, n):
yield (data_low * (n-i) + data_high * i) / n
else: # iterable
pairs = zip(data_low, data_high)
pair_iters = [interp_avg(x, y, n) for x, y in pairs]
for i in range(1, n):
yield [iter.next() for iter in pair_iters]
def color_name(data, bits):
"""Color names in #RRGGBB format, given the number of bits for each component."""
ret = ["#"]
for i in range(3):
ret.append("%02X" % (data[i] << (8-bits[i])))
return ''.join(ret)
class DXT1(FieldSet):
static_size = 64
def __init__(self, parent, name, dxt2_mode=False, *args, **kwargs):
"""with dxt2_mode on, this field will always use the four color model"""
FieldSet.__init__(self, parent, name, *args, **kwargs)
self.dxt2_mode = dxt2_mode
def createFields(self):
values = [[], []]
for i in (0, 1):
yield Bits(self, "blue[]", 5)
yield Bits(self, "green[]", 6)
yield Bits(self, "red[]", 5)
values[i] = [self["red[%i]" % i].value,
self["green[%i]" % i].value,
self["blue[%i]" % i].value]
if values[0] > values[1] or self.dxt2_mode:
values += interp_avg(values[0], values[1], 3)
else:
values += interp_avg(values[0], values[1], 2)
values.append(None) # transparent
for i in xrange(16):
pixel = Bits(self, "pixel[%i][%i]" % divmod(i, 4), 2)
color = values[pixel.value]
if color is None:
pixel._description = "Transparent"
else:
pixel._description = "RGB color: %s" % color_name(color, [5, 6, 5])
yield pixel
class DXT3Alpha(FieldSet):
static_size = 64
def createFields(self):
for i in xrange(16):
yield Bits(self, "alpha[%i][%i]" % divmod(i, 4), 4)
class DXT3(FieldSet):
static_size = 128
def createFields(self):
yield DXT3Alpha(self, "alpha", "Alpha Channel Data")
yield DXT1(self, "color", True, "Color Channel Data")
class DXT5Alpha(FieldSet):
static_size = 64
def createFields(self):
values = []
yield UInt8(self, "alpha_val[0]", "First alpha value")
values.append(self["alpha_val[0]"].value)
yield UInt8(self, "alpha_val[1]", "Second alpha value")
values.append(self["alpha_val[1]"].value)
if values[0] > values[1]:
values += interp_avg(values[0], values[1], 7)
else:
values += interp_avg(values[0], values[1], 5)
values += [0, 255]
for i in xrange(16):
pixel = Bits(self, "alpha[%i][%i]" % divmod(i, 4), 3)
alpha = values[pixel.value]
pixel._description = "Alpha value: %i" % alpha
yield pixel
class DXT5(FieldSet):
static_size = 128
def createFields(self):
yield DXT5Alpha(self, "alpha", "Alpha Channel Data")
yield DXT1(self, "color", True, "Color Channel Data")
class BLP2File(Parser):
MAGIC = "BLP2"
PARSER_TAGS = {
"id": "blp2",
"category": "game",
"file_ext": ("blp",),
"mime": (u"application/x-blp",),
"magic": ((MAGIC, 0),),
"min_size": 5*32, # 5 DWORDs start, incl. magic
"description": "Blizzard Image Format, version 2",
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "BLP2":
return "Invalid magic"
return True
def createFields(self):
yield String(self, "magic", 4, "Signature (BLP2)")
yield Enum(UInt32(self, "compression", "Compression type"), {
0:"JPEG Compressed",
1:"Uncompressed or DXT/S3TC compressed"})
yield Enum(UInt8(self, "encoding", "Encoding type"), {
1:"Raw",
2:"DXT/S3TC Texture Compression (a.k.a. DirectX)"})
yield UInt8(self, "alpha_depth", "Alpha channel depth, in bits (0 = no alpha)")
yield Enum(UInt8(self, "alpha_encoding", "Encoding used for alpha channel"), {
0:"DXT1 alpha (0 or 1 bit alpha)",
1:"DXT3 alpha (4 bit alpha)",
7:"DXT5 alpha (8 bit interpolated alpha)"})
yield Enum(UInt8(self, "has_mips", "Are mip levels present?"), {
0:"No mip levels",
1:"Mip levels present; number of levels determined by image size"})
yield UInt32(self, "width", "Base image width")
yield UInt32(self, "height", "Base image height")
for i in xrange(16):
yield UInt32(self, "mipmap_offset[]")
for i in xrange(16):
yield UInt32(self, "mipmap_size[]")
yield PaletteRGBA(self, "palette", 256)
compression = self["compression"].value
encoding = self["encoding"].value
alpha_depth = self["alpha_depth"].value
alpha_encoding = self["alpha_encoding"].value
width = self["width"].value
height = self["height"].value
if compression == 0: # JPEG Compression
yield UInt32(self, "jpeg_header_len")
yield RawBytes(self, "jpeg_header", self["jpeg_header_len"].value, "Shared JPEG Header")
offsets = self.array("mipmap_offset")
sizes = self.array("mipmap_size")
for i in xrange(16):
if not offsets[i].value or not sizes[i].value:
continue
padding = self.seekByte(offsets[i].value)
if padding:
yield padding
if compression == 0:
yield RawBytes(self, "mipmap[%i]" % i, sizes[i].value, "JPEG data, append to header to recover complete image")
elif compression == 1 and encoding == 1:
yield Generic2DArray(self, "mipmap_indexes[%i]" % i, height, width, PaletteIndex, "row", "index", "Indexes into the palette")
if alpha_depth == 1:
yield GenericVector(self, "mipmap_alphas[%i]" % i, height, width, Bit, "row", "is_opaque", "Alpha values")
elif alpha_depth == 8:
yield GenericVector(self, "mipmap_alphas[%i]" % i, height, width, UInt8, "row", "alpha", "Alpha values")
elif compression == 1 and encoding == 2:
block_height = alignValue(height, 4) // 4
block_width = alignValue(width, 4) // 4
if alpha_depth in [0, 1] and alpha_encoding == 0:
yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT1, "row", "block", "DXT1-compressed image blocks")
elif alpha_depth == 8 and alpha_encoding == 1:
yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT3, "row", "block", "DXT3-compressed image blocks")
elif alpha_depth == 8 and alpha_encoding == 7:
yield Generic2DArray(self, "mipmap[%i]" % i, block_height, block_width, DXT5, "row", "block", "DXT5-compressed image blocks")
width /= 2
height /= 2
| gpl-3.0 |
zhoulingjun/django | django/views/decorators/csrf.py | 586 | 2202 | from functools import wraps
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.utils.decorators import available_attrs, decorator_from_middleware
csrf_protect = decorator_from_middleware(CsrfViewMiddleware)
csrf_protect.__name__ = "csrf_protect"
csrf_protect.__doc__ = """
This decorator adds CSRF protection in exactly the same way as
CsrfViewMiddleware, but it can be used on a per view basis. Using both, or
using the decorator multiple times, is harmless and efficient.
"""
class _EnsureCsrfToken(CsrfViewMiddleware):
# We need this to behave just like the CsrfViewMiddleware, but not reject
# requests or log warnings.
def _reject(self, request, reason):
return None
requires_csrf_token = decorator_from_middleware(_EnsureCsrfToken)
requires_csrf_token.__name__ = 'requires_csrf_token'
requires_csrf_token.__doc__ = """
Use this decorator on views that need a correct csrf_token available to
RequestContext, but without the CSRF protection that csrf_protect
enforces.
"""
class _EnsureCsrfCookie(CsrfViewMiddleware):
def _reject(self, request, reason):
return None
def process_view(self, request, callback, callback_args, callback_kwargs):
retval = super(_EnsureCsrfCookie, self).process_view(request, callback, callback_args, callback_kwargs)
# Forces process_response to send the cookie
get_token(request)
return retval
ensure_csrf_cookie = decorator_from_middleware(_EnsureCsrfCookie)
ensure_csrf_cookie.__name__ = 'ensure_csrf_cookie'
ensure_csrf_cookie.__doc__ = """
Use this decorator to ensure that a view sets a CSRF cookie, whether or not it
uses the csrf_token template tag, or the CsrfViewMiddleware is used.
"""
def csrf_exempt(view_func):
"""
Marks a view function as being exempt from the CSRF view protection.
"""
# We could just do view_func.csrf_exempt = True, but decorators
# are nicer if they don't have side-effects, so we return a new
# function.
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.csrf_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| bsd-3-clause |
vityurkiv/Ox | python/MooseDocs/database/items.py | 2 | 3034 | """
The following objects are designed to work with the Database class,
see Database.py for usage.
"""
import os
import re
import subprocess
import MooseDocs
from markdown.util import etree
import logging
log = logging.getLogger(__name__)
class DatabaseItem(object):
"""
Base class for database items.
Args:
filename[str]: The complete filename (supplied by Database object).
"""
output = os.path.dirname(subprocess.check_output(['git', 'rev-parse', '--git-dir'], stderr=subprocess.STDOUT))
def __init__(self, filename, **kwargs):
self._filename = os.path.abspath(filename)
self._rel_path = os.path.relpath(filename, self.output)
self._config = kwargs
def keys(self):
pass
def markdown(self):
pass
def html(self):
pass
def filename(self):
return self._filename
def content(self):
fid = open(self._filename, 'r')
content = fid.read()
fid.close()
return content
class MarkdownIncludeItem(DatabaseItem):
"""
An item that returns a markdown include string for use with the markdown_include extension.
"""
def keys(self):
yield self._filename
def markdown(self):
return '{{!{}!}}'.format(self._filename)
class RegexItem(DatabaseItem):
"""
An item that creates keys base on regex match.
"""
def __init__(self, filename, regex, **kwargs):
DatabaseItem.__init__(self, filename, **kwargs)
self._regex = re.compile(regex)
self._repo = os.path.join(kwargs.get('repo'), self._rel_path)
def keys(self):
"""
Return the keys for which this item will be stored in the database.
"""
keys = []
for match in re.finditer(self._regex, self.content()):
k = match.group('key')
if k not in keys:
keys.append(k)
return keys
class InputFileItem(RegexItem):
"""
Returns a list item for input file matching of (type = ).
"""
def __init__(self, filename, **kwargs):
RegexItem.__init__(self, filename, r'type\s*=\s*(?P<key>\w+)\b', **kwargs)
def markdown(self):
return '* [{}]({})'.format(self._rel_path, self._repo)
def html(self):
el = etree.Element('li')
a = etree.SubElement(el, 'a')
a.set('href', self._repo)
a.text = self._rel_path
return el
class ChildClassItem(RegexItem):
"""
Returns a list item for h file containing a base.
"""
def __init__(self, filename, **kwargs):
super(ChildClassItem, self).__init__(filename, r'public\s*(?P<key>\w+)\b', **kwargs)
def html(self, element='li'):
c_filename = self._filename.replace('/include/', '/src/').replace('.h', '.C')
el = etree.Element(element)
a = etree.SubElement(el, 'a')
a.set('href', self._repo)
a.text = self._rel_path
if os.path.exists(c_filename):
etree.SubElement(el, 'br')
c_rel_path = self._rel_path.replace('/include/', '/src/').replace('.h', '.C')
c_repo = self._repo.replace('/include/', '/src/').replace('.h', '.C')
a = etree.SubElement(el, 'a')
a.set('href', c_repo)
a.text = c_rel_path
return el
| lgpl-2.1 |
mihailignatenko/erp | openerp/addons/base/tests/test_ir_sequence.py | 39 | 9375 | # -*- coding: utf-8 -*-
# Run with one of these commands:
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=. python tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy nosetests tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=../:. unit2 test_ir_sequence
# This assume an existing database.
import psycopg2
import psycopg2.errorcodes
import unittest2
import openerp
from openerp.tests import common
DB = common.DB
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry(model):
return openerp.modules.registry.RegistryManager.get(DB)[model]
def cursor():
return openerp.modules.registry.RegistryManager.get(DB).cursor()
def drop_sequence(code):
cr = cursor()
for model in ['ir.sequence', 'ir.sequence.type']:
s = registry(model)
ids = s.search(cr, ADMIN_USER_ID, [('code', '=', code)])
s.unlink(cr, ADMIN_USER_ID, ids)
cr.commit()
cr.close()
class test_ir_sequence_standard(unittest2.TestCase):
""" A few tests for a 'Standard' (i.e. PostgreSQL) sequence. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_search(self):
""" Try a search. """
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID, [], {})
assert ids
cr.commit()
cr.close()
def test_ir_sequence_draw(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice(self):
""" Try to draw a number from two transactions. """
cr0 = cursor()
cr1 = cursor()
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type', {})
assert n1
cr0.commit()
cr1.commit()
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type')
class test_ir_sequence_no_gap(unittest2.TestCase):
""" Copy of the previous tests for a 'No gap' sequence. """
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_2', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_2', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_draw_no_gap(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice_no_gap(self):
""" Try to draw a number from two transactions.
This is expected to not work.
"""
cr0 = cursor()
cr1 = cursor()
cr1._default_log_exceptions = False # Prevent logging a traceback
with self.assertRaises(psycopg2.OperationalError) as e:
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type_2', {})
self.assertEqual(e.exception.pgcode, psycopg2.errorcodes.LOCK_NOT_AVAILABLE, msg="postgresql returned an incorrect errcode")
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_2')
class test_ir_sequence_change_implementation(unittest2.TestCase):
""" Create sequence objects and change their ``implementation`` field. """
def test_ir_sequence_1_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_3', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_3', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_2_write(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'standard'}, {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'no_gap'}, {})
cr.commit()
cr.close()
def test_ir_sequence_3_unlink(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').unlink(cr, ADMIN_USER_ID, ids, {})
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_3')
drop_sequence('test_sequence_type_4')
class test_ir_sequence_generate(unittest2.TestCase):
""" Create sequence objects and generate some values. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_5', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_5', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_5', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_6', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_6', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_6', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_5')
drop_sequence('test_sequence_type_6')
class Test_ir_sequence_init(common.TransactionCase):
def test_00(self):
registry, cr, uid = self.registry, self.cr, self.uid
# test if read statement return the good number_next value (from postgreSQL sequence and not ir_sequence value)
sequence = registry('ir.sequence')
# first creation of sequence (normal)
values = {'number_next': 1,
'company_id': 1,
'padding': 4,
'number_increment': 1,
'implementation': 'standard',
'name': 'test-sequence-00'}
seq_id = sequence.create(cr, uid, values)
# Call get next 4 times
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
read_sequence = sequence.next_by_id(cr, uid, seq_id)
# Read the value of the current sequence
assert read_sequence == "0004", 'The actual sequence value must be 4. reading : %s' % read_sequence
# reset sequence to 1 by write method calling
sequence.write(cr, uid, [seq_id], {'number_next': 1})
# Read the value of the current sequence
read_sequence = sequence.next_by_id(cr, uid, seq_id)
assert read_sequence == "0001", 'The actual sequence value must be 1. reading : %s' % read_sequence
if __name__ == "__main__":
unittest2.main()
| agpl-3.0 |
PatrickOReilly/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
gorjuce/odoo | addons/website_sale_options/models/sale_order.py | 237 | 2831 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.tools.translate import _
class sale_order_line(osv.Model):
_inherit = "sale.order.line"
_columns = {
'linked_line_id': fields.many2one('sale.order.line', 'Linked Order Line', domain="[('order_id','!=',order_id)]", ondelete='cascade'),
'option_line_ids': fields.one2many('sale.order.line', 'linked_line_id', string='Options Linked'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
line_ids = super(sale_order, self)._cart_find_product_line(cr, uid, ids, product_id, line_id, context=context)
if line_id:
return line_ids
linked_line_id = kwargs.get('linked_line_id')
optional_product_ids = kwargs.get('optional_product_ids')
for so in self.browse(cr, uid, ids, context=context):
domain = [('id', 'in', line_ids)]
domain += linked_line_id and [('linked_line_id', '=', linked_line_id)] or [('linked_line_id', '=', False)]
if optional_product_ids:
domain += [('option_line_ids.product_id', '=', pid) for pid in optional_product_ids]
else:
domain += [('option_line_ids', '=', False)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
value = super(sale_order, self)._cart_update(cr, uid, ids, product_id, line_id, add_qty, set_qty, context=context, **kwargs)
linked_line_id = kwargs.get('linked_line_id')
sol = self.pool.get('sale.order.line')
line = sol.browse(cr, SUPERUSER_ID, value.get('line_id'), context=context)
for so in self.browse(cr, uid, ids, context=context):
if linked_line_id and linked_line_id in map(int,so.order_line):
linked = sol.browse(cr, SUPERUSER_ID, linked_line_id, context=context)
line.write({
"name": _("%s\nOption for: %s") % (line.name, linked.product_id.name_get()[0][1]),
"linked_line_id": linked_line_id
})
# select linked product
option_ids = [l for l in so.order_line if l.linked_line_id.id == line.id]
# update line
for l in option_ids:
super(sale_order, self)._cart_update(cr, uid, ids, l.product_id.id, l.id, add_qty, set_qty, context=context, **kwargs)
value['option_ids'] = [l.id for l in option_ids]
return value
| agpl-3.0 |
chrissimpkins/hsh | setup.py | 1 | 2256 | import os
import re
from setuptools import setup, find_packages
def docs_read(fname):
return open(os.path.join(os.path.dirname(__file__), 'docs', fname)).read()
def version_read():
settings_file = open(os.path.join(os.path.dirname(__file__), 'lib', 'hsh', 'settings.py')).read()
major_regex = """major_version\s*?=\s*?["']{1}(\d+)["']{1}"""
minor_regex = """minor_version\s*?=\s*?["']{1}(\d+)["']{1}"""
patch_regex = """patch_version\s*?=\s*?["']{1}(\d+)["']{1}"""
major_match = re.search(major_regex, settings_file)
minor_match = re.search(minor_regex, settings_file)
patch_match = re.search(patch_regex, settings_file)
major_version = major_match.group(1)
minor_version = minor_match.group(1)
patch_version = patch_match.group(1)
if len(major_version) == 0:
major_version = 0
if len(minor_version) == 0:
minor_version = 0
if len(patch_version) == 0:
patch_version = 0
return major_version + "." + minor_version + "." + patch_version
setup(
name='hsh',
version=version_read(),
description='Simple file hash digests and file integrity checks',
long_description=(docs_read('README.rst')),
url='https://github.com/chrissimpkins/hsh',
license='MIT license',
author='Christopher Simpkins',
author_email='',
platforms=['any'],
entry_points = {
'console_scripts': [
'hsh = hsh.app:main'
],
},
packages=find_packages("lib"),
package_dir={'': 'lib'},
install_requires=['commandlines'],
keywords='file,hash,hash digest,checksum,file comparison,file integrity,file checksum,file check,SHA,MD5,SHA1,SHA224,SHA256,SHA384,SHA512',
include_package_data=True,
classifiers=[
'Intended Audience :: End Users/Desktop',
'Development Status :: 5 - Production/Stable',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows'
],
)
| mit |
vicky2135/lucious | oscar/lib/python2.7/site-packages/django/db/backends/postgresql/introspection.py | 51 | 10204 | from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if description.default and 'nextval' in description.default:
if field_type == 'IntegerField':
return 'AutoField'
elif field_type == 'BigIntegerField':
return 'BigAutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [
FieldInfo(*(
(force_text(line[0]),) +
line[1:6] +
(field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])
)) for line in cursor.description
]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
| bsd-3-clause |
yyoshinori/CastlePortal | public/assets/adminlte/bower_components/jvectormap/converter/converter.py | 129 | 10451 | #
# jVectorMap version 2.0.4
#
# Copyright 2011-2013, Kirill Lebedev
#
import sys
import shapely.geometry
import shapely.wkb
import shapely.affinity
from osgeo import ogr
from osgeo import osr
import json
import codecs
import copy
class Map:
def __init__(self, name, language):
self.paths = {}
self.name = name
self.language = language
self.width = 0
self.height = 0
self.bbox = []
def addPath(self, path, code, name):
self.paths[code] = {"path": path, "name": name}
def getJSCode(self):
map = {"paths": self.paths, "width": self.width, "height": self.height, "insets": self.insets, "projection": self.projection}
return "jQuery.fn.vectorMap('addMap', '"+self.name+"_"+self.projection['type']+"_"+self.language+"',"+json.dumps(map)+');'
class Converter:
def __init__(self, config):
args = {
'buffer_distance': -0.4,
'simplify_tolerance': 0.2,
'longitude0': 0,
'projection': 'mill',
'name': 'world',
'width': 900,
'language': 'en',
'precision': 2,
'insets': []
}
args.update(config)
self.map = Map(args['name'], args.get('language'))
if args.get('sources'):
self.sources = args['sources']
else:
self.sources = [{
'input_file': args.get('input_file'),
'where': args.get('where'),
'name_field': args.get('name_field'),
'code_field': args.get('code_field'),
'input_file_encoding': args.get('input_file_encoding')
}]
default_source = {
'where': '',
'name_field': 0,
'code_field': 1,
'input_file_encoding': 'iso-8859-1'
}
for index in range(len(self.sources)):
for key in default_source:
if self.sources[index].get(key) is None:
self.sources[index][key] = default_source[key]
self.features = {}
self.width = args.get('width')
self.minimal_area = args.get('minimal_area')
self.longitude0 = float(args.get('longitude0'))
self.projection = args.get('projection')
self.precision = args.get('precision')
self.buffer_distance = args.get('buffer_distance')
self.simplify_tolerance = args.get('simplify_tolerance')
self.for_each = args.get('for_each')
self.emulate_longitude0 = args.get('emulate_longitude0')
if args.get('emulate_longitude0') is None and (self.projection == 'merc' or self.projection =='mill') and self.longitude0 != 0:
self.emulate_longitude0 = True
if args.get('viewport'):
self.viewport = map(lambda s: float(s), args.get('viewport').split(' '))
else:
self.viewport = False
# spatial reference to convert to
self.spatialRef = osr.SpatialReference()
projString = '+proj='+str(self.projection)+' +a=6381372 +b=6381372 +lat_0=0'
if not self.emulate_longitude0:
projString += ' +lon_0='+str(self.longitude0)
self.spatialRef.ImportFromProj4(projString)
# handle map insets
if args.get('insets'):
self.insets = args.get('insets')
else:
self.insets = []
def loadData(self):
for sourceConfig in self.sources:
self.loadDataSource( sourceConfig )
def loadDataSource(self, sourceConfig):
source = ogr.Open( sourceConfig['input_file'] )
layer = source.GetLayer(0)
layer.SetAttributeFilter( sourceConfig['where'].encode('ascii') )
self.viewportRect = False
transformation = osr.CoordinateTransformation( layer.GetSpatialRef(), self.spatialRef )
if self.viewport:
layer.SetSpatialFilterRect( *self.viewport )
point1 = transformation.TransformPoint(self.viewport[0], self.viewport[1])
point2 = transformation.TransformPoint(self.viewport[2], self.viewport[3])
self.viewportRect = shapely.geometry.box(point1[0], point1[1], point2[0], point2[1])
layer.ResetReading()
codes = {}
if self.emulate_longitude0:
meridian = -180 + self.longitude0
p1 = transformation.TransformPoint(-180, 89)
p2 = transformation.TransformPoint(meridian, -89)
left = shapely.geometry.box(p1[0], p1[1], p2[0], p2[1])
p3 = transformation.TransformPoint(meridian, 89)
p4 = transformation.TransformPoint(180, -89)
right = shapely.geometry.box(p3[0], p3[1], p4[0], p4[1])
# load features
nextCode = 0
for feature in layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
geometry.TransformTo( self.spatialRef )
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
if not shapelyGeometry.is_valid:
shapelyGeometry = shapelyGeometry.buffer(0, 1)
if self.emulate_longitude0:
leftPart = shapely.affinity.translate(shapelyGeometry.intersection(left), p4[0] - p3[0])
rightPart = shapely.affinity.translate(shapelyGeometry.intersection(right), p1[0] - p2[0])
shapelyGeometry = leftPart.buffer(0.1, 1).union(rightPart.buffer(0.1, 1)).buffer(-0.1, 1)
if not shapelyGeometry.is_valid:
shapelyGeometry = shapelyGeometry.buffer(0, 1)
shapelyGeometry = self.applyFilters(shapelyGeometry)
if shapelyGeometry:
name = feature.GetFieldAsString(str(sourceConfig.get('name_field'))).decode(sourceConfig.get('input_file_encoding'))
code = feature.GetFieldAsString(str(sourceConfig.get('code_field'))).decode(sourceConfig.get('input_file_encoding'))
if code in codes:
code = '_' + str(nextCode)
nextCode += 1
codes[code] = name
self.features[code] = {"geometry": shapelyGeometry, "name": name, "code": code}
else:
raise Exception, "Wrong geometry type: "+geometryType
def convert(self, outputFile):
print 'Generating '+outputFile
self.loadData()
codes = self.features.keys()
main_codes = copy.copy(codes)
self.map.insets = []
envelope = []
for inset in self.insets:
insetBbox = self.renderMapInset(inset['codes'], inset['left'], inset['top'], inset['width'])
insetHeight = (insetBbox[3] - insetBbox[1]) * (inset['width'] / (insetBbox[2] - insetBbox[0]))
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": inset['left'],
"top": inset['top'],
"width": inset['width'],
"height": insetHeight
})
envelope.append(
shapely.geometry.box(
inset['left'], inset['top'], inset['left'] + inset['width'], inset['top'] + insetHeight
)
)
for code in inset['codes']:
main_codes.remove(code)
insetBbox = self.renderMapInset(main_codes, 0, 0, self.width)
insetHeight = (insetBbox[3] - insetBbox[1]) * (self.width / (insetBbox[2] - insetBbox[0]))
envelope.append( shapely.geometry.box( 0, 0, self.width, insetHeight ) )
mapBbox = shapely.geometry.MultiPolygon( envelope ).bounds
self.map.width = mapBbox[2] - mapBbox[0]
self.map.height = mapBbox[3] - mapBbox[1]
self.map.insets.append({
"bbox": [{"x": insetBbox[0], "y": -insetBbox[3]}, {"x": insetBbox[2], "y": -insetBbox[1]}],
"left": 0,
"top": 0,
"width": self.width,
"height": insetHeight
})
self.map.projection = {"type": self.projection, "centralMeridian": float(self.longitude0)}
open(outputFile, 'w').write( self.map.getJSCode() )
if self.for_each is not None:
for code in codes:
childConfig = copy.deepcopy(self.for_each)
for param in ('input_file', 'output_file', 'where', 'name'):
childConfig[param] = childConfig[param].replace('{{code}}', code.lower())
converter = Converter(childConfig)
converter.convert(childConfig['output_file'])
def renderMapInset(self, codes, left, top, width):
envelope = []
for code in codes:
envelope.append( self.features[code]['geometry'].envelope )
bbox = shapely.geometry.MultiPolygon( envelope ).bounds
scale = (bbox[2]-bbox[0]) / width
# generate SVG paths
for code in codes:
feature = self.features[code]
geometry = feature['geometry']
if self.buffer_distance:
geometry = geometry.buffer(self.buffer_distance*scale, 1)
if geometry.is_empty:
continue
if self.simplify_tolerance:
geometry = geometry.simplify(self.simplify_tolerance*scale, preserve_topology=True)
if isinstance(geometry, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geoms
else:
polygons = [geometry]
path = ''
for polygon in polygons:
rings = []
rings.append(polygon.exterior)
rings.extend(polygon.interiors)
for ring in rings:
for pointIndex in range( len(ring.coords) ):
point = ring.coords[pointIndex]
if pointIndex == 0:
path += 'M'+str( round( (point[0]-bbox[0]) / scale + left, self.precision) )
path += ','+str( round( (bbox[3] - point[1]) / scale + top, self.precision) )
else:
path += 'l' + str( round(point[0]/scale - ring.coords[pointIndex-1][0]/scale, self.precision) )
path += ',' + str( round(ring.coords[pointIndex-1][1]/scale - point[1]/scale, self.precision) )
path += 'Z'
self.map.addPath(path, feature['code'], feature['name'])
return bbox
def applyFilters(self, geometry):
if self.viewportRect:
geometry = self.filterByViewport(geometry)
if not geometry:
return False
if self.minimal_area:
geometry = self.filterByMinimalArea(geometry)
if not geometry:
return False
return geometry
def filterByViewport(self, geometry):
try:
return geometry.intersection(self.viewportRect)
except shapely.geos.TopologicalError:
return False
def filterByMinimalArea(self, geometry):
if isinstance(geometry, shapely.geometry.multipolygon.MultiPolygon):
polygons = geometry.geoms
else:
polygons = [geometry]
polygons = filter(lambda p: p.area > self.minimal_area, polygons)
return shapely.geometry.multipolygon.MultiPolygon(polygons)
args = {}
if len(sys.argv) > 1:
paramsJson = open(sys.argv[1], 'r').read()
else:
paramsJson = sys.stdin.read()
paramsJson = json.loads(paramsJson)
converter = Converter(paramsJson)
converter.convert(paramsJson['output_file'])
| mit |
winndows/cinder | cinder/openstack/common/scheduler/filters/json_filter.py | 22 | 4914 | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_serialization import jsonutils
import six
from cinder.openstack.common.scheduler import filters
class JsonFilter(filters.BaseHostFilter):
"""Host Filter to allow simple JSON-based grammar for
selecting hosts.
"""
def _op_compare(self, args, op):
"""Returns True if the specified operator can successfully
compare the first item in the args with all the rest. Will
return False if only one item is in the list.
"""
if len(args) < 2:
return False
if op is operator.contains:
bad = args[0] not in args[1:]
else:
bad = [arg for arg in args[1:]
if not op(args[0], arg)]
return not bool(bad)
def _equals(self, args):
"""First term is == all the other terms."""
return self._op_compare(args, operator.eq)
def _less_than(self, args):
"""First term is < all the other terms."""
return self._op_compare(args, operator.lt)
def _greater_than(self, args):
"""First term is > all the other terms."""
return self._op_compare(args, operator.gt)
def _in(self, args):
"""First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
"""First term is <= all the other terms."""
return self._op_compare(args, operator.le)
def _greater_than_equal(self, args):
"""First term is >= all the other terms."""
return self._op_compare(args, operator.ge)
def _not(self, args):
"""Flip each of the arguments."""
return [not arg for arg in args]
def _or(self, args):
"""True if any arg is True."""
return any(args)
def _and(self, args):
"""True if all args are True."""
return all(args)
commands = {
'=': _equals,
'<': _less_than,
'>': _greater_than,
'in': _in,
'<=': _less_than_equal,
'>=': _greater_than_equal,
'not': _not,
'or': _or,
'and': _and,
}
def _parse_string(self, string, host_state):
"""Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may
use: $variable.dictkey
"""
if not string:
return None
if not string.startswith("$"):
return string
path = string[1:].split(".")
obj = getattr(host_state, path[0], None)
if obj is None:
return None
for item in path[1:]:
obj = obj.get(item)
if obj is None:
return None
return obj
def _process_filter(self, query, host_state):
"""Recursively parse the query structure."""
if not query:
return True
cmd = query[0]
method = self.commands[cmd]
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host_state)
elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
return result
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can fulfill the requirements
specified in the query.
"""
# TODO(zhiteng) Add description for filter_properties structure
# and scheduler_hints.
try:
query = filter_properties['scheduler_hints']['query']
except KeyError:
query = None
if not query:
return True
# NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
# Filter it out.
return True
return False
| apache-2.0 |
cneill/barbican | barbican/queue/keystone_listener.py | 2 | 6732 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Server-side (i.e. worker side) Keystone notification related classes and logic.
"""
import oslo_messaging
from oslo_service import service
from barbican.common import utils
from barbican import queue
from barbican.tasks import keystone_consumer
LOG = utils.getLogger(__name__)
class NotificationTask(object):
"""Task which exposes the API for consuming priority based notifications.
The Oslo notification framework delivers notifications based on priority to
matching callback APIs as defined in its notification listener endpoint
list.
Currently from Keystone perspective, `info` API is sufficient as Keystone
send notifications at `info` priority ONLY. Other priority level APIs
(warn, error, critical, audit, debug) are not needed here.
"""
def __init__(self, conf):
self.conf = conf
def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""Receives notification at info level."""
return self.process_event(ctxt, publisher_id, event_type, payload,
metadata)
def process_event(self, ctxt, publisher_id, event_type, payload, metadata):
"""Process Keystone Event based on event_type and payload data.
Parses notification data to identify if the event is related to delete
project or not. In case of delete project event, it passes project_id
to KeystoneEventConsumer logic for further processing. Barbican service
is not interested in other events so in that case it just returns None
as acknowledgment.
Messaging server considers message is acknowledged when either return
value is `oslo_messaging.NotificationResult.HANDLED` or None.
In case of successful processing of notification, the returned value is
`oslo_messaging.NotificationResult.HANDLED`
In case of notification processing error, the value returned
is oslo_messaging.NotificationResult.REQUEUE when transport
supports this feature otherwise
`oslo_messaging.NotificationResult.HANDLED` is returned.
"""
LOG.debug("Input keystone event publisher_id = %s", publisher_id)
LOG.debug("Input keystone event payload = %s", payload)
LOG.debug("Input keystone event type = %s", event_type)
LOG.debug("Input keystone event metadata = %s", metadata)
project_id = self._parse_payload_for_project_id(payload)
resource_type, operation_type = self._parse_event_type(event_type)
LOG.debug('Keystone Event: resource type={0}, operation type={1}, '
'keystone id={2}'.format(resource_type, operation_type,
project_id))
if (project_id and resource_type == 'project' and
operation_type == 'deleted'):
task = keystone_consumer.KeystoneEventConsumer()
try:
task.process(project_id=project_id,
resource_type=resource_type,
operation_type=operation_type)
return oslo_messaging.NotificationResult.HANDLED
except Exception:
# No need to log message here as task process method has
# already logged it
# TODO(john-wood-w) This really should be retried on a
# schedule and really only if the database is down, not
# for any exception otherwise tasks will be re-queued
# repeatedly. Revisit as part of the retry task work later.
if self.conf.keystone_notifications.allow_requeue:
return oslo_messaging.NotificationResult.REQUEUE
else:
return oslo_messaging.NotificationResult.HANDLED
return None # in case event is not project delete
def _parse_event_type(self, event_type):
"""Parses event type provided as part of notification.
Parses to identify what operation is performed and on which Keystone
resource.
A few event type sample values are provided below::
identity.project.deleted
identity.role.created
identity.domain.updated
identity.authenticate
"""
resource_type = None
operation_type = None
if event_type:
type_list = event_type.split('.')
# 2 is min. number of dot delimiters expected in event_type value.
if len(type_list) > 2:
resource_type = type_list[-2].lower()
operation_type = type_list[-1].lower()
return resource_type, operation_type
def _parse_payload_for_project_id(self, payload_s):
"""Gets project resource identifier from payload
Sample payload is provided below::
{'resource_info': u'2b99a94ad02741978e613fb52dd1f4cd'}
"""
if payload_s:
return payload_s.get('resource_info')
class MessageServer(NotificationTask, service.Service):
"""Server to retrieve messages from queue used by Keystone.
This is used to send public notifications for openstack service
consumption.
This server is an Oslo notification server that exposes set of standard
APIs for events consumption based on event priority.
Some of messaging server configuration needs to match with Keystone
deployment notification configuration e.g. exchange name, topic name
"""
def __init__(self, conf):
pool_size = conf.keystone_notifications.thread_pool_size
NotificationTask.__init__(self, conf)
service.Service.__init__(self, threads=pool_size)
self.target = queue.get_notification_target()
self._msg_server = queue.get_notification_server(targets=[self.target],
endpoints=[self])
def start(self):
self._msg_server.start()
super(MessageServer, self).start()
def stop(self):
super(MessageServer, self).stop()
self._msg_server.stop()
queue.cleanup()
| apache-2.0 |
emergebtc/muddery | evennia/evennia/players/players.py | 1 | 26439 | """
Typeclass for Player objects
Note that this object is primarily intended to
store OOC information, not game info! This
object represents the actual user (not their
character) and has NO actual precence in the
game world (this is handled by the associated
character object, so you should customize that
instead for most things).
"""
from django.conf import settings
from django.utils import timezone
from evennia.typeclasses.models import TypeclassBase
from evennia.players.manager import PlayerManager
from evennia.players.models import PlayerDB
from evennia.comms.models import ChannelDB
from evennia.commands import cmdhandler
from evennia.utils import logger
from evennia.utils.utils import (lazy_property, to_str,
make_iter, to_unicode,
variable_from_module)
from evennia.typeclasses.attributes import NickHandler
from evennia.scripts.scripthandler import ScriptHandler
from evennia.commands.cmdsethandler import CmdSetHandler
from django.utils.translation import ugettext as _
__all__ = ("DefaultPlayer",)
_SESSIONS = None
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_CMDSET_PLAYER = settings.CMDSET_PLAYER
_CONNECT_CHANNEL = None
class DefaultPlayer(PlayerDB):
"""
This is the base Typeclass for all Players. Players represent
the person playing the game and tracks account info, password
etc. They are OOC entities without presence in-game. A Player
can connect to a Character Object in order to "enter" the
game.
Player Typeclass API:
* Available properties (only available on initiated typeclass objects)
key (string) - name of player
name (string)- wrapper for user.username
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
user (User, read-only) - django User authorization object
obj (Object) - game object controlled by player. 'character' can also
be used.
sessions (list of Sessions) - sessions connected to this player
is_superuser (bool, read-only) - if the connected user is a superuser
* Handlers
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not
create a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods
msg(outgoing_string, from_obj=None, **kwargs)
#swap_character(new_character, delete_old_character=False)
execute_cmd(raw_string)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None,
ignore_errors=False, player=False)
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False, no_superuser_bypass=False)
check_permstring(permstring)
* Hook methods
basetype_setup()
at_player_creation()
- note that the following hooks are also found on Objects and are
usually handled on the character level:
at_init()
at_access()
at_cmdset_get(**kwargs)
at_first_login()
at_post_login(sessid=None)
at_disconnect()
at_message_receive()
at_message_send()
at_server_reload()
at_server_shutdown()
"""
__metaclass__ = TypeclassBase
objects = PlayerManager()
# properties
@lazy_property
def cmdset(self):
return CmdSetHandler(self, True)
@lazy_property
def scripts(self):
return ScriptHandler(self)
@lazy_property
def nicks(self):
return NickHandler(self)
# session-related methods
def get_session(self, sessid):
"""
Return session with given sessid connected to this player.
note that the sessionhandler also accepts sessid as an iterable.
"""
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.session_from_player(self, sessid)
def get_all_sessions(self):
"Return all sessions connected to this player"
global _SESSIONS
if not _SESSIONS:
from evennia.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.sessions_from_player(self)
sessions = property(get_all_sessions) # alias shortcut
def disconnect_session_from_player(self, sessid):
"""
Access method for disconnecting a given session from the player
(connection happens automatically in the sessionhandler)
"""
# this should only be one value, loop just to make sure to
# clean everything
sessions = (session for session in self.get_all_sessions()
if session.sessid == sessid)
for session in sessions:
# this will also trigger unpuppeting
session.sessionhandler.disconnect(session)
# puppeting operations
def puppet_object(self, sessid, obj):
"""
Use the given session to control (puppet) the given object (usually
a Character type).
Args:
sessid (int): session id of session to connect
obj (Object): the object to start puppeting
Raises:
RuntimeError with message if puppeting is not possible
returns True if successful, False otherwise
"""
# safety checks
if not obj:
raise RuntimeError("Object not found")
session = self.get_session(sessid)
if not session:
raise RuntimeError("Session not found")
if self.get_puppet(sessid) == obj:
# already puppeting this object
raise RuntimeError("You are already puppeting this object.")
if not obj.access(self, 'puppet'):
# no access
raise RuntimeError("You don't have permission to puppet '%s'." % obj.key)
if obj.player:
# object already puppeted
if obj.player == self:
if obj.sessid.count():
# we may take over another of our sessions
# output messages to the affected sessions
if _MULTISESSION_MODE in (1, 3):
txt1 = "{c%s{n{G is now shared from another of your sessions.{n"
txt2 = "Sharing {c%s{n with another of your sessions."
else:
txt1 = "{c%s{n{R is now acted from another of your sessions.{n"
txt2 = "Taking over {c%s{n from another of your sessions."
self.unpuppet_object(obj.sessid.get())
self.msg(txt1 % obj.name, sessid=obj.sessid.get(), _forced_nomulti=True)
self.msg(txt2 % obj.name, sessid=sessid, _forced_nomulti=True)
elif obj.player.is_connected:
# controlled by another player
raise RuntimeError("{R{c%s{R is already puppeted by another Player.")
# do the puppeting
if session.puppet:
# cleanly unpuppet eventual previous object puppeted by this session
self.unpuppet_object(sessid)
# if we get to this point the character is ready to puppet or it
# was left with a lingering player/sessid reference from an unclean
# server kill or similar
obj.at_pre_puppet(self, sessid=sessid)
# do the connection
obj.sessid.add(sessid)
obj.player = self
session.puid = obj.id
session.puppet = obj
# validate/start persistent scripts on object
obj.scripts.validate()
obj.at_post_puppet()
# re-cache locks to make sure superuser bypass is updated
obj.locks.cache_lock_bypass(obj)
def unpuppet_object(self, sessid):
"""
Disengage control over an object
Args:
sessid(int): the session id to disengage
Raises:
RuntimeError with message about error.
"""
if _MULTISESSION_MODE == 1:
sessions = self.get_all_sessions()
else:
sessions = self.get_session(sessid)
if not sessions:
raise RuntimeError("No session was found.")
for session in make_iter(sessions):
obj = session.puppet or None
if not obj:
raise RuntimeError("No puppet was found to disconnect from.")
elif obj:
# do the disconnect, but only if we are the last session to puppet
obj.at_pre_unpuppet()
obj.sessid.remove(session.sessid)
if not obj.sessid.count():
del obj.player
obj.at_post_unpuppet(self, sessid=sessid)
# Just to be sure we're always clear.
session.puppet = None
session.puid = None
def unpuppet_all(self):
"""
Disconnect all puppets. This is called by server
before a reset/shutdown.
"""
for session in (sess for sess in self.get_all_sessions() if sess.puppet):
self.unpuppet_object(session.sessid)
def get_puppet(self, sessid, return_dbobj=False):
"""
Get an object puppeted by this session through this player. This is
the main method for retrieving the puppeted object from the
player's end.
sessid - return character connected to this sessid,
"""
session = self.get_session(sessid)
if not session:
return None
if return_dbobj:
return session.puppet
return session.puppet and session.puppet or None
def get_all_puppets(self):
"""
Get all currently puppeted objects as a list.
"""
return list(set(session.puppet for session in self.get_all_sessions()
if session.puppet))
def __get_single_puppet(self):
"""
This is a legacy convenience link for users of
MULTISESSION_MODE 0 or 1. It will return
only the first puppet. For mode 2, this returns
a list of all characters.
"""
puppets = self.get_all_puppets()
if _MULTISESSION_MODE in (0, 1):
return puppets and puppets[0] or None
return puppets
character = property(__get_single_puppet)
puppet = property(__get_single_puppet)
# utility methods
def delete(self, *args, **kwargs):
"""
Deletes the player permanently.
"""
for session in self.get_all_sessions():
# unpuppeting all objects and disconnecting the user, if any
# sessions remain (should usually be handled from the
# deleting command)
try:
self.unpuppet_object(session.sessid)
except RuntimeError:
# no puppet to disconnect from
pass
session.sessionhandler.disconnect(session, reason=_("Player being deleted."))
self.scripts.stop()
self.attributes.clear()
self.nicks.clear()
self.aliases.clear()
super(PlayerDB, self).delete(*args, **kwargs)
## methods inherited from database model
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
Args:
text (str, optional): text data to send
from_obj (Object or Player, optional): object sending. If given,
its at_msg_send() hook will be called.
sessid (int or list, optional): session id or ids to receive this
send. If given, overrules MULTISESSION_MODE.
Notes:
All other keywords are passed on to the protocol.
"""
text = to_str(text, force_string=True) if text else ""
if from_obj:
# call hook
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
pass
# session relay
if sessid:
# this could still be an iterable if sessid is an iterable
sessions = self.get_session(sessid)
if sessions:
# this is a special instruction to ignore MULTISESSION_MODE
# and only relay to this given session.
kwargs["_nomulti"] = True
for session in make_iter(sessions):
session.msg(text=text, **kwargs)
return
# we only send to the first of any connected sessions - the sessionhandler
# will disperse this to the other sessions based on MULTISESSION_MODE.
sessions = self.get_all_sessions()
if sessions:
sessions[0].msg(text=text, **kwargs)
def execute_cmd(self, raw_string, sessid=None, **kwargs):
"""
Do something as this player. This method is never called normally,
but only when the player object itself is supposed to execute the
command. It takes player nicks into account, but not nicks of
eventual puppets.
raw_string - raw command input coming from the command line.
sessid - the optional session id to be responsible for the command-send
**kwargs - other keyword arguments will be added to the found command
object instace as variables before it executes. This is
unused by default Evennia but may be used to set flags and
change operating paramaters for commands at run-time.
"""
raw_string = to_unicode(raw_string)
raw_string = self.nicks.nickreplace(raw_string,
categories=("inputline", "channel"), include_player=False)
if not sessid and _MULTISESSION_MODE in (0, 1):
# in this case, we should either have only one sessid, or the sessid
# should not matter (since the return goes to all of them we can
# just use the first one as the source)
try:
sessid = self.get_all_sessions()[0].sessid
except IndexError:
# this can happen for bots
sessid = None
return cmdhandler.cmdhandler(self, raw_string,
callertype="player", sessid=sessid, **kwargs)
def search(self, searchdata, return_puppet=False,
nofound_string=None, multimatch_string=None, **kwargs):
"""
This is similar to the ObjectDB search method but will search for
Players only. Errors will be echoed, and None returned if no Player
is found.
searchdata - search criterion, the Player's key or dbref to search for
return_puppet - will try to return the object the player controls
instead of the Player object itself. If no
puppeted object exists (since Player is OOC), None will
be returned.
nofound_string - optional custom string for not-found error message.
multimatch_string - optional custom string for multimatch error header.
Extra keywords are ignored, but are allowed in call in order to make
API more consistent with objects.models.TypedObject.search.
"""
# handle me, self and *me, *self
if isinstance(searchdata, basestring):
# handle wrapping of common terms
if searchdata.lower() in ("me", "*me", "self", "*self",):
return self
matches = self.__class__.objects.player_search(searchdata)
matches = _AT_SEARCH_RESULT(self, searchdata, matches, global_search=True,
nofound_string=nofound_string,
multimatch_string=multimatch_string)
if matches and return_puppet:
try:
return matches.puppet
except AttributeError:
return None
return matches
def access(self, accessing_obj, access_type='read', default=False, no_superuser_bypass=False, **kwargs):
"""
Determines if another object has permission to access this object
in whatever way.
Args:
accessing_obj (Object): Object trying to access this one
access_type (str, optional): Type of access sought
default (bool, optional): What to return if no lock of access_type was found
no_superuser_bypass (bool, optional): Turn off superuser
lock bypassing. Be careful with this one.
Kwargs:
Passed to the at_access hook along with the result.
Returns:
result (bool): Result of access check.
"""
result = super(DefaultPlayer, self).access(accessing_obj, access_type=access_type,
default=default, no_superuser_bypass=no_superuser_bypass)
self.at_access(result, accessing_obj, access_type, **kwargs)
return result
## player hooks
def basetype_setup(self):
"""
This sets up the basic properties for a player.
Overload this with at_player_creation rather than
changing this method.
"""
# A basic security setup
lockstring = "examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:all()"
self.locks.add(lockstring)
# The ooc player cmdset
self.cmdset.add_default(_CMDSET_PLAYER, permanent=True)
def at_player_creation(self):
"""
This is called once, the very first time
the player is created (i.e. first time they
register with the game). It's a good place
to store attributes all players should have,
like configuration values etc.
"""
# set an (empty) attribute holding the characters this player has
lockstring = "attrread:perm(Admins);attredit:perm(Admins);attrcreate:perm(Admins)"
self.attributes.add("_playable_characters", [], lockstring=lockstring)
def at_init(self):
"""
This is always called whenever this object is initiated --
that is, whenever it its typeclass is cached from memory. This
happens on-demand first time the object is used or activated
in some way after being created but also after each server
restart or reload. In the case of player objects, this usually
happens the moment the player logs in or reconnects after a
reload.
"""
pass
# Note that the hooks below also exist in the character object's
# typeclass. You can often ignore these and rely on the character
# ones instead, unless you are implementing a multi-character game
# and have some things that should be done regardless of which
# character is currently connected to this player.
def at_first_save(self):
"""
This is a generic hook called by Evennia when this object is
saved to the database the very first time. You generally
don't override this method but the hooks called by it.
"""
self.basetype_setup()
self.at_player_creation()
permissions = settings.PERMISSION_PLAYER_DEFAULT
if hasattr(self, "_createdict"):
# this will only be set if the utils.create_player
# function was used to create the object.
cdict = self._createdict
if cdict.get("locks"):
self.locks.add(cdict["locks"])
if cdict.get("permissions"):
permissions = cdict["permissions"]
del self._createdict
self.permissions.add(permissions)
def at_access(self, result, accessing_obj, access_type, **kwargs):
"""
This is called with the result of an access call, along with
any kwargs used for that call. The return of this method does
not affect the result of the lock check. It can be used e.g. to
customize error messages in a central location or other effects
based on the access result.
"""
pass
def at_cmdset_get(self, **kwargs):
"""
Called just before cmdsets on this player are requested by the
command handler. If changes need to be done on the fly to the
cmdset before passing them on to the cmdhandler, this is the
place to do it. This is called also if the player currently
have no cmdsets. kwargs are usually not used unless the
cmdset is generated dynamically.
"""
pass
def at_first_login(self):
"""
Called the very first time this player logs into the game.
"""
pass
def at_pre_login(self):
"""
Called every time the user logs in, just before the actual
login-state is set.
"""
pass
def _send_to_connect_channel(self, message):
"Helper method for loading the default comm channel"
global _CONNECT_CHANNEL
if not _CONNECT_CHANNEL:
try:
_CONNECT_CHANNEL = ChannelDB.objects.filter(db_key=settings.DEFAULT_CHANNELS[1]["key"])[0]
except Exception:
logger.log_trace()
now = timezone.now()
now = "%02i-%02i-%02i(%02i:%02i)" % (now.year, now.month,
now.day, now.hour, now.minute)
if _CONNECT_CHANNEL:
_CONNECT_CHANNEL.tempmsg("[%s, %s]: %s" % (_CONNECT_CHANNEL.key, now, message))
else:
logger.log_infomsg("[%s]: %s" % (now, message))
def at_post_login(self, sessid=None):
"""
Called at the end of the login process, just before letting
the player loose. This is called before an eventual Character's
at_post_login hook.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
if _MULTISESSION_MODE == 0:
# in this mode we should have only one character available. We
# try to auto-connect to our last conneted object, if any
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE == 1:
# in this mode all sessions connect to the same puppet.
self.puppet_object(sessid, self.db._last_puppet)
elif _MULTISESSION_MODE in (2, 3):
# In this mode we by default end up at a character selection
# screen. We execute look on the player.
self.execute_cmd("look", sessid=sessid)
def at_disconnect(self, reason=None):
"""
Called just before user is disconnected.
"""
reason = reason and "(%s)" % reason or ""
self._send_to_connect_channel("{R%s disconnected %s{n" % (self.key, reason))
def at_post_disconnect(self):
"""
This is called after disconnection is complete. No messages
can be relayed to the player from here. After this call, the
player should not be accessed any more, making this a good
spot for deleting it (in the case of a guest player account,
for example).
"""
pass
def at_message_receive(self, message, from_obj=None):
"""
Called when any text is emitted to this
object. If it returns False, no text
will be sent automatically.
"""
return True
def at_message_send(self, message, to_object):
"""
Called whenever this object tries to send text
to another object. Only called if the object supplied
itself as a sender in the msg() call.
"""
pass
def at_server_reload(self):
"""
This hook is called whenever the server is shutting down for
restart/reboot. If you want to, for example, save non-persistent
properties across a restart, this is the place to do it.
"""
pass
def at_server_shutdown(self):
"""
This hook is called whenever the server is shutting down fully
(i.e. not for a restart).
"""
pass
class DefaultGuest(DefaultPlayer):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
def at_post_login(self, sessid=None):
"""
In theory, guests only have one character regardless of which
MULTISESSION_MODE we're in. They don't get a choice.
"""
self._send_to_connect_channel("{G%s connected{n" % self.key)
self.puppet_object(sessid, self.db._last_puppet)
def at_disconnect(self):
"""
A Guest's characters aren't meant to linger on the server. When a
Guest disconnects, we remove its character.
"""
super(DefaultGuest, self).at_disconnect()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_server_shutdown(self):
"""
We repeat at_disconnect() here just to be on the safe side.
"""
super(DefaultGuest, self).at_server_shutdown()
characters = self.db._playable_characters
for character in filter(None, characters):
character.delete()
def at_post_disconnect(self):
"""
Guests aren't meant to linger on the server, either. We need to wait
until after the Guest disconnects to delete it, though.
"""
super(DefaultGuest, self).at_post_disconnect()
self.delete()
| bsd-3-clause |
bhamza/ntu-dsi-dcn | src/core/bindings/modulegen_customizations.py | 19 | 20774 | import re
import os
import sys
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass, param, retval
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
# class SmartPointerTransformation(typehandlers.TypeTransformation):
# """
# This class provides a "type transformation" that tends to support
# NS-3 smart pointers. Parameters such as "Ptr<Foo> foo" are
# transformed into something like Parameter.new("Foo*", "foo",
# transfer_ownership=False). Return values such as Ptr<Foo> are
# transformed into ReturnValue.new("Foo*",
# caller_owns_return=False). Since the underlying objects have
# reference counting, PyBindGen does the right thing.
# """
# def __init__(self):
# super(SmartPointerTransformation, self).__init__()
# self.rx = re.compile(r'(ns3::|::ns3::|)Ptr<([^>]+)>\s*$')
# def _get_untransformed_type_traits(self, name):
# m = self.rx.match(name)
# is_const = False
# if m is None:
# return None, False
# else:
# name1 = m.group(2).strip()
# if name1.startswith('const '):
# name1 = name1[len('const '):]
# is_const = True
# if name1.endswith(' const'):
# name1 = name1[:-len(' const')]
# is_const = True
# new_name = name1+' *'
# if new_name.startswith('::'):
# new_name = new_name[2:]
# return new_name, is_const
# def get_untransformed_name(self, name):
# new_name, dummy_is_const = self._get_untransformed_type_traits(name)
# return new_name
# def create_type_handler(self, type_handler, *args, **kwargs):
# if issubclass(type_handler, Parameter):
# kwargs['transfer_ownership'] = False
# elif issubclass(type_handler, ReturnValue):
# kwargs['caller_owns_return'] = False
# else:
# raise AssertionError
# ## fix the ctype, add ns3:: namespace
# orig_ctype, is_const = self._get_untransformed_type_traits(args[0])
# if is_const:
# correct_ctype = 'ns3::Ptr< %s const >' % orig_ctype[:-2]
# else:
# correct_ctype = 'ns3::Ptr< %s >' % orig_ctype[:-2]
# args = tuple([correct_ctype] + list(args[1:]))
# handler = type_handler(*args, **kwargs)
# handler.set_tranformation(self, orig_ctype)
# return handler
# def untransform(self, type_handler, declarations, code_block, expression):
# return 'const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression)
# def transform(self, type_handler, declarations, code_block, expression):
# assert type_handler.untransformed_ctype[-1] == '*'
# return 'ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:-1], expression)
# ## register the type transformation
# transf = SmartPointerTransformation()
# typehandlers.return_type_matcher.register_transformation(transf)
# typehandlers.param_type_matcher.register_transformation(transf)
# del transf
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'!PyString_Check(item)',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'%s[%s] = PyString_AsString(item);' % (name, idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
# class CallbackImplProxyMethod(typehandlers.ReverseWrapperBase):
# """
# Class that generates a proxy virtual method that calls a similarly named python method.
# """
# def __init__(self, return_value, parameters):
# super(CallbackImplProxyMethod, self).__init__(return_value, parameters)
# def generate_python_call(self):
# """code to call the python method"""
# build_params = self.build_params.get_parameters(force_tuple_creation=True)
# if build_params[0][0] == '"':
# build_params[0] = '(char *) ' + build_params[0]
# args = self.before_call.declare_variable('PyObject*', 'args')
# self.before_call.write_code('%s = Py_BuildValue(%s);'
# % (args, ', '.join(build_params)))
# self.before_call.add_cleanup_code('Py_DECREF(%s);' % args)
# self.before_call.write_code('py_retval = PyObject_CallObject(m_callback, %s);' % args)
# self.before_call.write_error_check('py_retval == NULL')
# self.before_call.add_cleanup_code('Py_DECREF(py_retval);')
# def generate_callback_classes(out, callbacks):
# for callback_impl_num, template_parameters in enumerate(callbacks):
# sink = MemoryCodeSink()
# cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
# #print >> sys.stderr, "***** trying to register callback: %r" % cls_name
# class_name = "PythonCallbackImpl%i" % callback_impl_num
# sink.writeln('''
# class %s : public ns3::CallbackImpl<%s>
# {
# public:
# PyObject *m_callback;
# %s(PyObject *callback)
# {
# Py_INCREF(callback);
# m_callback = callback;
# }
# virtual ~%s()
# {
# Py_DECREF(m_callback);
# m_callback = NULL;
# }
# virtual bool IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other_base) const
# {
# const %s *other = dynamic_cast<const %s*> (ns3::PeekPointer (other_base));
# if (other != NULL)
# return (other->m_callback == m_callback);
# else
# return false;
# }
# ''' % (class_name, ', '.join(template_parameters), class_name, class_name, class_name, class_name))
# sink.indent()
# callback_return = template_parameters[0]
# return_ctype = ctypeparser.parse_type(callback_return)
# if ('const' in return_ctype.remove_modifiers()):
# kwargs = {'is_const': True}
# else:
# kwargs = {}
# try:
# return_type = ReturnValue.new(str(return_ctype), **kwargs)
# except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
# warnings.warn("***** Unable to register callback; Return value '%s' error (used in %s): %r"
# % (callback_return, cls_name, ex),
# Warning)
# continue
# arguments = []
# ok = True
# callback_parameters = [arg for arg in template_parameters[1:] if arg != 'ns3::empty']
# for arg_num, arg_type in enumerate(callback_parameters):
# arg_name = 'arg%i' % (arg_num+1)
# param_ctype = ctypeparser.parse_type(arg_type)
# if ('const' in param_ctype.remove_modifiers()):
# kwargs = {'is_const': True}
# else:
# kwargs = {}
# try:
# arguments.append(Parameter.new(str(param_ctype), arg_name, **kwargs))
# except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
# warnings.warn("***** Unable to register callback; parameter '%s %s' error (used in %s): %r"
# % (arg_type, arg_name, cls_name, ex),
# Warning)
# ok = False
# if not ok:
# continue
# wrapper = CallbackImplProxyMethod(return_type, arguments)
# wrapper.generate(sink, 'operator()', decl_modifiers=[])
# sink.unindent()
# sink.writeln('};\n')
# sink.flush_to(out)
# class PythonCallbackParameter(Parameter):
# "Class handlers"
# CTYPES = [cls_name]
# #print >> sys.stderr, "***** registering callback handler: %r" % ctypeparser.normalize_type_string(cls_name)
# DIRECTIONS = [Parameter.DIRECTION_IN]
# PYTHON_CALLBACK_IMPL_NAME = class_name
# TEMPLATE_ARGS = template_parameters
# def convert_python_to_c(self, wrapper):
# "parses python args to get C++ value"
# assert isinstance(wrapper, typehandlers.ForwardWrapperBase)
# if self.default_value is None:
# py_callback = wrapper.declarations.declare_variable('PyObject*', self.name)
# wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name)
# wrapper.before_call.write_error_check(
# '!PyCallable_Check(%s)' % py_callback,
# 'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
# callback_impl = wrapper.declarations.declare_variable(
# 'ns3::Ptr<%s>' % self.PYTHON_CALLBACK_IMPL_NAME,
# '%s_cb_impl' % self.name)
# wrapper.before_call.write_code("%s = ns3::Create<%s> (%s);"
# % (callback_impl, self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
# wrapper.call_params.append(
# 'ns3::Callback<%s> (%s)' % (', '.join(self.TEMPLATE_ARGS), callback_impl))
# else:
# py_callback = wrapper.declarations.declare_variable('PyObject*', self.name, 'NULL')
# wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name, optional=True)
# value = wrapper.declarations.declare_variable(
# 'ns3::Callback<%s>' % ', '.join(self.TEMPLATE_ARGS),
# self.name+'_value',
# self.default_value)
# wrapper.before_call.write_code("if (%s) {" % (py_callback,))
# wrapper.before_call.indent()
# wrapper.before_call.write_error_check(
# '!PyCallable_Check(%s)' % py_callback,
# 'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
# wrapper.before_call.write_code("%s = ns3::Callback<%s> (ns3::Create<%s> (%s));"
# % (value, ', '.join(self.TEMPLATE_ARGS),
# self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
# wrapper.before_call.unindent()
# wrapper.before_call.write_code("}") # closes: if (py_callback) {
# wrapper.call_params.append(value)
# def convert_c_to_python(self, wrapper):
# raise typehandlers.NotSupportedError("Reverse wrappers for ns3::Callback<...> types "
# "(python using callbacks defined in C++) not implemented.")
# def write_preamble(out):
# pybindgen.write_preamble(out)
# out.writeln("#include \"ns3/everything.h\"")
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
# def Object_customizations(module):
# ## ---------------------------------------------------------------------
# ## Here we generate custom constructor code for all classes that
# ## derive from ns3::Object. The custom constructors are needed in
# ## order to support kwargs only and to translate kwargs into ns3
# ## attributes, etc.
# ## ---------------------------------------------------------------------
# Object = module['ns3::Object']
# ## add a GetTypeId method to all generatd helper classes
# def helper_class_hook(helper_class):
# decl = """
# static ns3::TypeId GetTypeId (void)
# {
# static ns3::TypeId tid = ns3::TypeId ("%s")
# .SetParent< %s > ()
# ;
# return tid;
# }""" % (helper_class.name, helper_class.class_.full_name)
# helper_class.add_custom_method(decl)
# helper_class.add_post_generation_code(
# "NS_OBJECT_ENSURE_REGISTERED (%s);" % helper_class.name)
# Object.add_helper_class_hook(helper_class_hook)
# def ns3_object_instance_creation_function(cpp_class, code_block, lvalue,
# parameters, construct_type_name):
# assert lvalue
# assert not lvalue.startswith('None')
# if cpp_class.cannot_be_constructed:
# raise CodeGenerationError("%s cannot be constructed (%s)"
# % cpp_class.full_name)
# if cpp_class.incomplete_type:
# raise CodeGenerationError("%s cannot be constructed (incomplete type)"
# % cpp_class.full_name)
# code_block.write_code("%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
# code_block.write_code("%s->Ref ();" % (lvalue))
# def ns3_object_post_instance_creation_function(cpp_class, code_block, lvalue,
# parameters, construct_type_name):
# code_block.write_code("ns3::CompleteConstruct(%s);" % (lvalue, ))
# Object.set_instance_creation_function(ns3_object_instance_creation_function)
# Object.set_post_instance_creation_function(ns3_object_post_instance_creation_function)
# def Attribute_customizations(module):
# # Fix up for the "const AttributeValue &v = EmptyAttribute()"
# # case, as used extensively by helper classes.
# # Here's why we need to do this: pybindgen.gccxmlscanner, when
# # scanning parameter default values, is only provided with the
# # value as a simple C expression string. (py)gccxml does not
# # report the type of the default value.
# # As a workaround, here we iterate over all parameters of all
# # methods of all classes and tell pybindgen what is the type of
# # the default value for attributes.
# for cls in module.classes:
# for meth in cls.get_all_methods():
# for param in meth.parameters:
# if isinstance(param, cppclass.CppClassRefParameter):
# if param.cpp_class.name == 'AttributeValue' \
# and param.default_value is not None \
# and param.default_value_type is None:
# param.default_value_type = 'ns3::EmptyAttributeValue'
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
def post_register_types(root_module):
Simulator_customizations(root_module)
CommandLine_customizations(root_module)
TypeId_customizations(root_module)
add_std_ofstream(root_module)
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'Threading' not in enabled_features:
for clsname in ['SystemThread', 'SystemMutex', 'SystemCondition', 'CriticalSection',
'SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
if 'RealTime' not in enabled_features:
for clsname in ['WallClockSynchronizer', 'RealtimeSimulatorImpl']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::RealtimeSimulatorImpl::SynchronizationMode'])
# these are already in the main script, so commented out here
# Object_customizations(root_module)
# Attribute_customizations(root_module)
#def post_register_functions(root_module):
# pass
| gpl-2.0 |
sqlfocus/linux | tools/perf/python/twatch.py | 625 | 2726 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main(context_switch = 0, thread = -1):
cpus = perf.cpu_map()
threads = perf.thread_map(thread)
evsel = perf.evsel(type = perf.TYPE_SOFTWARE,
config = perf.COUNT_SW_DUMMY,
task = 1, comm = 1, mmap = 0, freq = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1, context_switch = context_switch,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
"""What we want are just the PERF_RECORD_ lifetime events for threads,
using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
(the default), makes perf reenable irq_vectors:local_timer_entry, when
disabling nohz, not good for some use cases where all we want is to get
threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
freq=0) instead."""
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
"""
To test the PERF_RECORD_SWITCH record, pick a pid and replace
in the following line.
Example output:
cpu: 3, pid: 31463, tid: 31593 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31593, switch_out: 1 }
cpu: 1, pid: 31463, tid: 31489 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31489, switch_out: 1 }
cpu: 2, pid: 31463, tid: 31496 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31496, switch_out: 1 }
cpu: 3, pid: 31463, tid: 31491 { type: context_switch, next_prev_pid: 31463, next_prev_tid: 31491, switch_out: 0 }
It is possible as well to use event.misc & perf.PERF_RECORD_MISC_SWITCH_OUT
to figure out if this is a context switch in or out of the monitored threads.
If bored, please add command line option parsing support for these options :-)
"""
# main(context_switch = 1, thread = 31463)
main()
| gpl-2.0 |
inares/edx-platform | openedx/core/djangoapps/user_api/preferences/api.py | 48 | 17601 | """
API for managing user preferences.
"""
import logging
import analytics
from eventtracking import tracker
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from student.models import User, UserProfile
from request_cache import get_request_or_stub
from ..errors import (
UserAPIInternalError, UserAPIRequestError, UserNotFound, UserNotAuthorized,
PreferenceValidationError, PreferenceUpdateError
)
from ..helpers import intercept_errors
from ..models import UserOrgTag, UserPreference
from ..serializers import UserSerializer, RawUserPreferenceSerializer
log = logging.getLogger(__name__)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def get_user_preference(requesting_user, preference_key, username=None):
"""Returns the value of the user preference with the specified key.
Args:
requesting_user (User): The user requesting the user preferences. Only the user with username
`username` or users with "is_staff" privileges can access the preferences.
preference_key (str): The key for the user preference.
username (str): Optional username for which to look up the preferences. If not specified,
`requesting_user.username` is assumed.
Returns:
The value for the user preference which is always a string, or None if a preference
has not been specified.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to the user preference.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username, allow_staff=True)
return UserPreference.get_value(existing_user, preference_key)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def get_user_preferences(requesting_user, username=None):
"""Returns all user preferences as a JSON response.
Args:
requesting_user (User): The user requesting the user preferences. Only the user with username
`username` or users with "is_staff" privileges can access the preferences.
username (str): Optional username for which to look up the preferences. If not specified,
`requesting_user.username` is assumed.
Returns:
A dict containing account fields.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to the user preference.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username, allow_staff=True)
# Django Rest Framework V3 uses the current request to version
# hyperlinked URLS, so we need to retrieve the request and pass
# it in the serializer's context (otherwise we get an AssertionError).
# We're retrieving the request from the cache rather than passing it in
# as an argument because this is an implementation detail of how we're
# serializing data, which we want to encapsulate in the API call.
context = {
"request": get_request_or_stub()
}
user_serializer = UserSerializer(existing_user, context=context)
return user_serializer.data["preferences"]
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def update_user_preferences(requesting_user, update, user=None):
"""Update the user preferences for the given user.
Note:
It is up to the caller of this method to enforce the contract that this method is only called
with the user who made the request.
Arguments:
requesting_user (User): The user requesting to modify account information. Only the user with username
'username' has permissions to modify account information.
update (dict): The updated account field values.
Some notes:
Values are expected to be strings. Non-string values will be converted to strings.
Null values for a preference will be treated as a request to delete the key in question.
user (str/User): Optional, either username string or user object specifying which account should be updated.
If not specified, `requesting_user.username` is assumed.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to change the account
associated with `username`
PreferenceValidationError: the update was not attempted because validation errors were found
PreferenceUpdateError: the operation failed when performing the update.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
if not user or isinstance(user, basestring):
user = _get_authorized_user(requesting_user, user)
else:
_check_authorized(requesting_user, user.username)
# First validate each preference setting
errors = {}
serializers = {}
for preference_key in update.keys():
preference_value = update[preference_key]
if preference_value is not None:
try:
serializer = create_user_preference_serializer(user, preference_key, preference_value)
validate_user_preference_serializer(serializer, preference_key, preference_value)
serializers[preference_key] = serializer
except PreferenceValidationError as error:
preference_error = error.preference_errors[preference_key]
errors[preference_key] = {
"developer_message": preference_error["developer_message"],
"user_message": preference_error["user_message"],
}
if errors:
raise PreferenceValidationError(errors)
# Then perform the patch
for preference_key in update.keys():
preference_value = update[preference_key]
if preference_value is not None:
try:
serializer = serializers[preference_key]
serializer.save()
except Exception as error:
raise _create_preference_update_error(preference_key, preference_value, error)
else:
delete_user_preference(requesting_user, preference_key)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def set_user_preference(requesting_user, preference_key, preference_value, username=None):
"""Update a user preference for the given username.
Note:
It is up to the caller of this method to enforce the contract that this method is only called
with the user who made the request.
Arguments:
requesting_user (User): The user requesting to modify account information. Only the user with username
'username' has permissions to modify account information.
preference_key (str): The key for the user preference.
preference_value (str): The value to be stored. Non-string values will be converted to strings.
username (str): Optional username specifying which account should be updated. If not specified,
`requesting_user.username` is assumed.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to change the account
associated with `username`
PreferenceValidationError: the update was not attempted because validation errors were found
PreferenceUpdateError: the operation failed when performing the update.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username)
serializer = create_user_preference_serializer(existing_user, preference_key, preference_value)
validate_user_preference_serializer(serializer, preference_key, preference_value)
try:
serializer.save()
except Exception as error:
raise _create_preference_update_error(preference_key, preference_value, error)
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def delete_user_preference(requesting_user, preference_key, username=None):
"""Deletes a user preference on behalf of a requesting user.
Note:
It is up to the caller of this method to enforce the contract that this method is only called
with the user who made the request.
Arguments:
requesting_user (User): The user requesting to delete the preference. Only the user with username
'username' has permissions to delete their own preference.
preference_key (str): The key for the user preference.
username (str): Optional username specifying which account should be updated. If not specified,
`requesting_user.username` is assumed.
Returns:
True if the preference was deleted, False if the user did not have a preference with the supplied key.
Raises:
UserNotFound: no user with username `username` exists (or `requesting_user.username` if
`username` is not specified)
UserNotAuthorized: the requesting_user does not have access to change the account
associated with `username`
PreferenceUpdateError: the operation failed when performing the update.
UserAPIInternalError: the operation failed due to an unexpected error.
"""
existing_user = _get_authorized_user(requesting_user, username)
try:
user_preference = UserPreference.objects.get(user=existing_user, key=preference_key)
except ObjectDoesNotExist:
return False
try:
user_preference.delete()
except Exception as error:
raise PreferenceUpdateError(
developer_message=u"Delete failed for user preference '{preference_key}': {error}".format(
preference_key=preference_key, error=error
),
user_message=_(u"Delete failed for user preference '{preference_key}'.").format(
preference_key=preference_key
),
)
return True
@intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError])
def update_email_opt_in(user, org, opt_in):
"""Updates a user's preference for receiving org-wide emails.
Sets a User Org Tag defining the choice to opt in or opt out of organization-wide
emails.
Arguments:
user (User): The user to set a preference for.
org (str): The org is used to determine the organization this setting is related to.
opt_in (bool): True if the user is choosing to receive emails for this organization.
If the user requires parental consent then email-optin is set to False regardless.
Returns:
None
Raises:
UserNotFound: no user profile exists for the specified user.
"""
preference, _ = UserOrgTag.objects.get_or_create(
user=user, org=org, key='email-optin'
)
# If the user requires parental consent, then don't allow opt-in
try:
user_profile = UserProfile.objects.get(user=user)
except ObjectDoesNotExist:
raise UserNotFound()
if user_profile.requires_parental_consent(
age_limit=getattr(settings, 'EMAIL_OPTIN_MINIMUM_AGE', 13),
default_requires_consent=False,
):
opt_in = False
# Update the preference and save it
preference.value = str(opt_in)
try:
preference.save()
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
_track_update_email_opt_in(user.id, org, opt_in)
except IntegrityError as err:
log.warn(u"Could not update organization wide preference due to IntegrityError: {}".format(err.message))
def _track_update_email_opt_in(user_id, organization, opt_in):
"""Track an email opt-in preference change.
Arguments:
user_id (str): The ID of the user making the preference change.
organization (str): The organization whose emails are being opted into or out of by the user.
opt_in (bool): Whether the user has chosen to opt-in to emails from the organization.
Returns:
None
"""
event_name = 'edx.bi.user.org_email.opted_in' if opt_in else 'edx.bi.user.org_email.opted_out'
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': 'communication',
'label': organization
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _get_authorized_user(requesting_user, username=None, allow_staff=False):
"""
Helper method to return the authorized user for a given username.
If username is not provided, requesting_user.username is assumed.
"""
if username is None:
username = requesting_user.username
try:
existing_user = User.objects.get(username=username)
except ObjectDoesNotExist:
raise UserNotFound()
_check_authorized(requesting_user, username, allow_staff)
return existing_user
def _check_authorized(requesting_user, username, allow_staff=False):
"""
Helper method that raises UserNotAuthorized if requesting user
is not owner user or is not staff if access to staff is given
(i.e. 'allow_staff' = true)
"""
if requesting_user.username != username:
if not requesting_user.is_staff or not allow_staff:
raise UserNotAuthorized()
def create_user_preference_serializer(user, preference_key, preference_value):
"""Creates a serializer for the specified user preference.
Arguments:
user (User): The user whose preference is being serialized.
preference_key (str): The key for the user preference.
preference_value (str): The value to be stored. Non-string values will be converted to strings.
Returns:
A serializer that can be used to save the user preference.
"""
try:
existing_user_preference = UserPreference.objects.get(user=user, key=preference_key)
except ObjectDoesNotExist:
existing_user_preference = None
new_data = {
"user": user.id,
"key": preference_key,
"value": preference_value,
}
if existing_user_preference:
serializer = RawUserPreferenceSerializer(existing_user_preference, data=new_data)
else:
serializer = RawUserPreferenceSerializer(data=new_data)
return serializer
def validate_user_preference_serializer(serializer, preference_key, preference_value):
"""Validates a user preference serializer.
Arguments:
serializer (UserPreferenceSerializer): The serializer to be validated.
preference_key (str): The key for the user preference.
preference_value (str): The value to be stored. Non-string values will be converted to strings.
Raises:
PreferenceValidationError: the supplied key and/or value for a user preference are invalid.
"""
if preference_value is None or unicode(preference_value).strip() == '':
format_string = ugettext_noop(u"Preference '{preference_key}' cannot be set to an empty value.")
raise PreferenceValidationError({
preference_key: {
"developer_message": format_string.format(preference_key=preference_key),
"user_message": _(format_string).format(preference_key=preference_key)
}
})
if not serializer.is_valid():
developer_message = u"Value '{preference_value}' not valid for preference '{preference_key}': {error}".format(
preference_key=preference_key, preference_value=preference_value, error=serializer.errors
)
if "key" in serializer.errors:
user_message = _(u"Invalid user preference key '{preference_key}'.").format(
preference_key=preference_key
)
else:
user_message = _(u"Value '{preference_value}' is not valid for user preference '{preference_key}'.").format(
preference_key=preference_key, preference_value=preference_value
)
raise PreferenceValidationError({
preference_key: {
"developer_message": developer_message,
"user_message": user_message,
}
})
def _create_preference_update_error(preference_key, preference_value, error):
""" Creates a PreferenceUpdateError with developer_message and user_message. """
return PreferenceUpdateError(
developer_message=u"Save failed for user preference '{key}' with value '{value}': {error}".format(
key=preference_key, value=preference_value, error=error
),
user_message=_(u"Save failed for user preference '{key}' with value '{value}'.").format(
key=preference_key, value=preference_value
),
)
| agpl-3.0 |
franchenstein/master_project | main.py | 1 | 18773 | #!/usr/bin
import probabilisticgraph as pg
import graphgenerator as gg
import dmarkov as dm
import sequenceanalyzer as sa
import yaml
import matplotlib.pyplot as plt
import synchwordfinder as swf
def main(config_file, fsw=False, terminate=False, dmark=False, generate=False, gen_seq=False, an_seq=False, plot=False,
seq_len=10000000, tag='default'):
with open(config_file, 'r') as f:
configs = yaml.load(f)
graph_path = configs['graph_path']
terminations = configs['terminations']
lmax = configs['lmax']
algorithms = configs['algorithms']
lrange = configs['lrange']
alpharange = configs['alpharange']
drange = configs['drange']
test = configs['test']
synch_words = configs['synch_words']
l2range = configs['l2range']
if fsw:
p = 'configs/' + graph_path + '/fsw_params.yaml'
with open(p, 'r') as f:
fsw_params = yaml.load(f)
find_synch_words(graph_path, fsw_params['w'], lmax, fsw_params['alpha'], fsw_params['test'], l2range)
if terminate:
terminate_graphs(graph_path, terminations, lrange, lmax, alpharange, test)
if dmark:
generate_dmarkov(graph_path, drange, lmax)
if generate:
seq_path = 'sequences/' + graph_path + '/original_length_' + str(seq_len) + '.yaml'
generate_graphs(algorithms, terminations, lmax, lrange, l2range, alpharange, graph_path, synch_words, test,
seq_path)
if gen_seq:
generate_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len)
if an_seq:
p = 'configs/' + graph_path + '/params.yaml'
with open(p, 'r') as f:
params = yaml.load(f)
analyze_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len,
params['to_analyze'], params['other_params'])
if plot:
p = 'configs/' + graph_path + '/plotconfigs.yaml'
with open(p, 'r') as f:
params = yaml.load(f)
if params['cond_entropy']:
plot_entropies(graph_path, algorithms, terminations, drange, lrange, alpharange, params['eval_l'], tag)
if params['autocorrelation']:
plot_autocorr(graph_path, algorithms, terminations, drange, lrange, alpharange, params['upto'], tag)
if params['kld']:
plot_others('kld', graph_path, algorithms, terminations, drange, lrange, alpharange, tag)
if params['l1metric']:
plot_others('l1metric', graph_path, algorithms, terminations, drange, lrange, alpharange, tag)
def find_synch_words(graph_path, w, l, alpha, test, l2range=[1]):
s = swf.SynchWordFinder(graph_path, w, l, alpha, test, l2range)
sw = s.find_synch_words()
path = "synch_words/" + graph_path + "/sw.yaml"
with open(path, "w") as f:
yaml.dump(sw, f)
def terminate_graphs(graph_path, terminations, lrange, lmax, alpharange, test):
g = pg.ProbabilisticGraph([], [])
if 'omega_inverted' in terminations:
synch_path = 'synch_words/' + graph_path + '/sw.yaml'
with open(synch_path, 'r') as f:
synch_words = yaml.load(f)
else:
synch_words = []
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'graphs/' + graph_path + '/rtp_L' + str(lmax) + '.yaml'
g.open_graph_file(p)
h = g.expand_last_level(l, t, alpha, test, synch_words)
path = 'graphs/' + graph_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml'
h.save_graph_file(path)
def generate_graphs(algorithms, terminations, maxl, lrange, l2range, alpharange, save_path, synch_words, test,
seq_path):
for t in terminations:
for l in lrange:
for alpha in alpharange:
p1 = 'graphs/' + save_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml'
p2 = 'graphs/' + save_path + '/L' + str(l) + '_alpha' + str(alpha) + '_' + t
g = gg.GraphGenerator(p1, synch_words, p2, seq_path)
for algo in algorithms:
if algo == 'mk1':
g.mk1(test, alpha, l2range[-1])
elif algo == 'mk2':
g.mk2()
elif algo == 'mk2_moore':
g.mk2_moore(test, alpha, l2range[-1])
elif algo == 'mk3':
g.mk3(test, alpha)
if 'crissis' in algorithms:
p1 = 'graphs/' + save_path + '/rtp_L' + str(maxl) + '.yaml'
for l2 in l2range:
for alpha in alpharange:
p2 = 'graphs/' + save_path + '/L_2_' + str(l2) + '_alpha' + str(alpha)
g = gg.GraphGenerator(p1, synch_words, p2, seq_path)
g.crissis(test, alpha, l2)
def generate_dmarkov(graph_path, drange, lmax):
g = pg.ProbabilisticGraph([], [])
for d in drange:
p = 'graphs/' + graph_path + '/rtp_L' + str(lmax) + '.yaml'
g.open_graph_file(p)
h = dm.DMarkov(g, d)
path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
h.save_graph_file(path)
def generate_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len):
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
for d in drange:
p = 'dmarkov_d' + str(d) + '.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
elif algo == 'crissis':
for l2 in l2range:
for alpha in alpharange:
p = 'L_2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
else:
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
def generate_sequences_core(g, graph_path, path, p, seq_len):
g.open_graph_file(path)
seq, v = g.generate_sequence(seq_len, g.states[0])
p = 'sequences/' + graph_path + '/len_' + str(seq_len) + '_' + p
with open(p, 'w') as f:
yaml.dump(seq, f)
def analyze_sequences(graph_path, algorithms, drange, terminations,
lrange, l2range, alpharange, seq_len, to_analyze, params):
for algo in algorithms:
if algo == 'dmark':
kld = []
l1 = []
for d in drange:
p = 'dmarkov_d' + str(d) + '.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/dmarkov.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/dmarkov.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
elif algo == 'crissis':
kld = []
l1 = []
for l2 in l2range:
for alpha in alpharange:
p = 'L_2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/crissis.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/crissis.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
else:
for t in terminations:
kld = []
l1 = []
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/' + t + '_' + algo + '.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/' + t + '_' + algo + '.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
def analyze_sequences_core_1(graph_path, path, to_analyze, params, seq_an):
kld = 0
l1 = 0
if to_analyze['probabilities']:
p, alph = seq_an.calc_probs(params['L'])
p_path = 'results/'+ graph_path + '/probabilities/' + path
with open(p_path, 'w') as f:
yaml.dump([p, alph], f)
if to_analyze['cond_probabilities']:
check_probs(seq_an, graph_path, path)
p_cond = seq_an.calc_cond_probs(params['L']-1)
p_cond_path = 'results/'+ graph_path + '/probabilities/cond_' + path
with open(p_cond_path, 'w') as f:
yaml.dump(p_cond, f)
if to_analyze['cond_entropy']:
check_probs(seq_an, graph_path, path)
check_cond_probs(seq_an, graph_path, path)
h = seq_an.calc_cond_entropy(params['L']-1)
h_path = 'results/'+ graph_path + '/cond_entropies/' + path
with open(h_path, 'w') as f:
yaml.dump(h, f)
if to_analyze['autocorrelation']:
a = seq_an.calc_autocorrelation(params['upto'])
a_path = 'results/' + graph_path + '/autocorrelations/' + path
with open(a_path, 'w') as f:
yaml.dump(a, f)
if to_analyze['kld']:
check_probs(seq_an, graph_path, path)
p = load_reference_probs(graph_path)
kld = seq_an.calc_kldivergence(p, params['K'])
if to_analyze['l1metric']:
check_probs(seq_an, graph_path, path)
p = load_reference_probs(graph_path)
l1 = seq_an.calc_l1metric(p, params['l1'])
return [kld, l1]
def check_probs(seq_an, graph_path, path):
if not seq_an.probabilities:
p_path = 'results/'+ graph_path + '/probabilities/' + path
with open(p_path, 'r') as f:
p, alph = yaml.load(f)
seq_an.probabilities = p
seq_an.alphabet = alph
def check_cond_probs(seq_an, graph_path, path):
if not seq_an.conditional_probabilities:
p_path = 'results/'+ graph_path + '/probabilities/cond_' + path
with open(p_path, 'r') as f:
pcond = yaml.load(f)
seq_an.conditional_probabilities = pcond
def load_reference_probs(graph_path):
path = 'results/' + graph_path + '/probabilities/original.yaml'
with open(path, 'r') as f:
p = yaml.load(f)
return p[0]
def plot_entropies(graph_path, algorithms, terminations, drange, lrange, alpharange, eval_l, tag):
path_original = 'results/' + graph_path + '/cond_entropies/original.yaml'
with open(path_original, 'r') as f:
h_original = yaml.load(f)
h_base = h_original[eval_l]
h = []
states = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
h_dmark = []
states_dmark = []
for d in drange:
h_path = 'results/' + graph_path + '/cond_entropies/dmarkov_d' + str(d) + '.yaml'
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h_dmark.append(h_eval[eval_l])
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
states_dmark.append(len(g.states))
h.append(h_dmark)
states.append(states_dmark)
lbl = 'D-Markov, D from ' + str(drange[0]) + ' to ' + str(drange[-1])
labels.append(lbl)
else:
for t in terminations:
h_term = []
states_term = []
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
h_path = 'results/' + graph_path + '/cond_entropies/' + p
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h_term.append(h_eval[eval_l])
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
states_term.append(len(g.states))
lbl = algo + ', ' + t
labels.append(lbl)
h.append(h_term)
states.append(states_term)
i = 0
for entropy in h:
plt.semilogx(states[i], entropy, marker='o', label = labels[i])
i += 1
plt.axhline(y=h_base, color='k', linewidth = 3, label='Original sequence baseline')
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Number of states')
plt.ylabel('Conditional Entropy')
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
save_path = 'plots/' + graph_path + '/cond_entropies_' + tag + '.png'
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def plot_others(kind, graph_path, algorithms, terminations, drange, lrange, alpharange, tag):
h = []
states = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
states_dmark = []
h_path = 'results/' + graph_path + '/' + kind + '/dmarkov.yaml'
with open(h_path, 'r') as f:
h.append(yaml.load(f))
for d in drange:
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
states_dmark.append(len(g.states))
states.append(states_dmark)
lbl = 'D-Markov, D from ' + str(drange[0]) + ' to ' + str(drange[-1])
labels.append(lbl)
else:
for t in terminations:
states_term = []
h_path = 'results/' + graph_path + '/' + kind + '/' + t + '_' + algo + '.yaml'
with open(h_path, 'r') as f:
h.append(yaml.load(f))
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
states_term.append(len(g.states))
lbl = algo + ', ' + t
labels.append(lbl)
states.append(states_term)
i = 0
for value in h:
print len(value)
print len(states[i])
plt.semilogx(states[i], value, marker='o', label=labels[i])
i += 1
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Number of states')
if kind == 'l1metric':
plt.ylabel('L1-Metric')
elif kind == 'kld':
plt.ylabel('Kullback-Leibler Divergence')
save_path = 'plots/' + graph_path + '/' + kind + '_' + tag + '.png'
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def plot_autocorr(graph_path, algorithms, terminations, drange, lrange, alpharange, up_to, tag):
path_original = 'results/' + graph_path + '/autocorrelations/original.yaml'
with open(path_original, 'r') as f:
h_base = yaml.load(f)
h = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
for d in drange:
h_path = 'results/' + graph_path + '/autocorrelations/dmarkov_d' + str(d) + '.yaml'
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h.append(h_eval)
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
lbl = 'D-Markov, D = ' + str(d) + ', ' + str(len(g.states)) + ' states'
labels.append(lbl)
else:
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
h_path = 'results/' + graph_path + '/autocorrelations/' + p
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h.append(h_eval)
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
lbl = algo + ', ' + t + ', L = ' +str(l) + '. ' + str(len(g.states)) + ' states'
labels.append(lbl)
i = 0
x = range(1, up_to)
for autocorr in h:
plt.plot(x, autocorr[1:up_to], marker='o', label=labels[i])
i += 1
plt.plot(x, h_base[1:up_to], color='k', linewidth=3, label='Original sequence')
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Lag')
plt.ylabel('Autocorrelation')
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
save_path = 'plots/' + graph_path + '/autocorrelations_' + tag + '.png'
plt.savefig(save_path, bbox_inches='tight')
plt.show()
| mit |
zaxliu/deepnap | experiments/kdd-exps/experiment_message_2016-6-11_BUF2_G5_FR100_legacy.py | 1 | 4371 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-11_BUF2_G5_FR100.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.5, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.5
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-11-05 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
jemsbhai/otp | lib/asn1/test/asn1_SUITE_data/SwCDR.py | 97 | 5470 | SwCDR DEFINITIONS
IMPLICIT TAGS ::=
BEGIN
EXPORTS
SwCDR;
SwCDR ::= CHOICE
{
origSvcCallRecord [0] OrigSvcCallRecord,
termSvcCallRecord [1] TermSvcCallRecord
}
--OrigSvcCallRecord ::= SET
OrigSvcCallRecord ::= SEQUENCE
{
callCorrelationId [0] INTEGER ,
chargingIndicator [1] ChargingIndicator,
sequenceNo [2] INTEGER ,
callingParty [3] CallingNumber,
calledParty [4] CalledNumber,
connectedNumber [5] ConnectedNumber,
startDate [6] StartDate,
startTime [7] StartTime,
duration [8] CallDuration ,
-- bearerClass [] BearerClass ,
trafficType [9] TrafficType ,
qosFwd [10] QoSClass ,
qosBkwd [11] QoSClass ,
forwardPcrClp0 [12] CellRate ,
forwardPcrClp01 [13] CellRate ,
backwardPcrClp0 [14] CellRate ,
backwardPcrClp01 [15] CellRate ,
forwardScrClp0 [16] CellRate ,
forwardScrClp01 [17] CellRate ,
backwardScrClp0 [18] CellRate ,
backwardScrClp01 [19] CellRate ,
forwardMcrClp0 [20] CellRate ,
forwardMcrClp01 [21] CellRate ,
backwardMcrClp0 [22] CellRate ,
backwardMcrClp01 [23] CellRate ,
forwardMbsClp0 [24] CellRate ,
forwardMbsClp01 [25] CellRate ,
forwardBEI [26] INTEGER ,
backwardBEI [27] INTEGER ,
forwardTagging [28] INTEGER ,
backwardTagging [29] INTEGER ,
-- egressCellrate0 [] INTEGER,
-- egressCellrate01 [] INTEGER,
ingressCellrate0 [30] INTEGER ,
-- ingressCellrate01 [] INTEGER ,
ingressCellrate1 [31] INTEGER ,
connectionConfig [32] UserPlaneConnection OPTIONAL
-- causeForTerm [33] CauseForTerm OPTIONAL
}
--TermSvcCallRecord ::= SET
TermSvcCallRecord ::= SEQUENCE
{
callCorrelationId [0] INTEGER ,
chargingIndicator [1] ChargingIndicator,
sequenceNo [2] INTEGER ,
callingParty [3] CallingNumber,
calledParty [4] CalledNumber,
connectedNumber [5] ConnectedNumber,
startDate [6] StartDate,
startTime [7] StartTime,
duration [8] CallDuration ,
-- bearerClass [] BearerClass ,
trafficType [9] TrafficType ,
qosFwd [10] QoSClass ,
qosBkwd [11] QoSClass ,
forwardPcrClp0 [12] CellRate ,
forwardPcrClp01 [13] CellRate ,
backwardPcrClp0 [14] CellRate ,
backwardPcrClp01 [15] CellRate ,
forwardScrClp0 [16] CellRate ,
forwardScrClp01 [17] CellRate ,
backwardScrClp0 [18] CellRate ,
backwardScrClp01 [19] CellRate ,
forwardMcrClp0 [20] CellRate ,
forwardMcrClp01 [21] CellRate ,
backwardMcrClp0 [22] CellRate ,
backwardMcrClp01 [23] CellRate ,
forwardMbsClp0 [24] CellRate ,
forwardMbsClp01 [25] CellRate ,
forwardBEI [26] INTEGER ,
backwardBEI [27] INTEGER ,
forwardTagging [28] INTEGER ,
backwardTagging [29] INTEGER ,
-- egressCellrate0 [] INTEGER ,
-- egressCellrate01 [] INTEGER ,
ingressCellrate0 [30] INTEGER ,
-- ingressCellrate01 [] INTEGER ,
ingressCellrate1 [31] INTEGER ,
connectionConfig [32] UserPlaneConnection OPTIONAL
-- causeForTerm [33] CauseForTerm OPTIONAL
}
ChargingIndicator ::= INTEGER
{
origCallRecord (0),
termCallRecord (1)
}
CallingNumber ::= OCTET STRING (SIZE (12))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
CalledNumber ::= OCTET STRING (SIZE (20))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation,
-- PresentationIndicator, ScreeningIndicator
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
ConnectedNumber ::= OCTET STRING (SIZE (12))
-- BCD encoded representation of the number.
-- Contains: TypeOfNumber, NumberingPlanInformation,
-- PresentationIndicator, ScreeningIndicator
-- and either an E.164 number or a NSAP style of number,
-- including a possible subaddress.
QoSClass ::= INTEGER
-- Explicit values ToBeDefined,
-- until then: value received in SETUP-msg
--BearerClass ::= INTEGER
--{
-- bcobA (0),
-- bcobC (1),
-- bcobX (2)
--}
TrafficType ::= INTEGER
{
noIndication (0),
abr (1),
cbr (2),
vbr (3),
vbrrt (4),
vbrnrt (5),
ubr (6)
}
--TimingRequirements ::= INTEGER
--{
-- noIndication (0),
-- endToEndRequired (1),
-- endToEndNotRequired (2)
--}
--ClippingSusceptibility ::= INTEGER
--{
-- notSusceptible (0),
-- susceptible (1)
--}
UserPlaneConnection ::= INTEGER
{
pointToPoint (0),
pointToMultipoint (1)
}
--AALParameters ::= INTEGER AAL Type only
--{
-- userDefined (0),
-- aal1 (1),
-- aal2 (2),
-- aal34 (3),
-- aal5 (5)
--}
CellRate ::= INTEGER
-- Value range not less than 2^24.
-- BurstSize ::= ToBeDefined
-- TaggingRequest ::= ToBeDefined
--Timestamp ::= OCTET STRING (SIZE (11))
-- The contents of this field is a compact form of
-- the UTCTime format, containing local time plus
-- an offset to universal time.
-- The compact format is YYMMDDhhmmssdddShhmm, where:
-- YY = year, 00-99, BCD encoded
-- MM = month, 01-12, BCD encoded
-- DD = day, 01-31, BCD encoded
-- hh = hour, 00-23, BCD encoded
-- mm = minute, 00-59, BCD encoded
-- ss = second, 00-59, BCD encoded
-- ddd = millisecond, 000-999, BCD encoded
-- and rightjustified as "0ddd"
-- S = sign, "+"/"-", ASCII encoded
StartDate ::= OCTET STRING (SIZE (8))
StartTime ::= OCTET STRING (SIZE (6))
CallDuration ::= INTEGER
-- Expressed as number of millseconds
Cellrate ::= INTEGER
-- Value range 0-2^64
CauseForTerm ::= INTEGER
{
unsuccessfulCallAttempt (0),
abnormalTermination (1)
}
END
| apache-2.0 |
xiejianying/pjsip_trunk | pjsip-apps/src/pygui/call.py | 26 | 3368 | # $Id$
#
# pjsua Python GUI Demo
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
if sys.version_info[0] >= 3: # Python 3
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as msgbox
else:
import Tkinter as tk
import tkMessageBox as msgbox
import ttk
import random
import pjsua2 as pj
import application
import endpoint as ep
# Call class
class Call(pj.Call):
"""
High level Python Call object, derived from pjsua2's Call object.
"""
def __init__(self, acc, peer_uri='', chat=None, call_id = pj.PJSUA_INVALID_ID):
pj.Call.__init__(self, acc, call_id)
self.acc = acc
self.peerUri = peer_uri
self.chat = chat
self.connected = False
self.onhold = False
def onCallState(self, prm):
ci = self.getInfo()
self.connected = ci.state == pj.PJSIP_INV_STATE_CONFIRMED
if self.chat:
self.chat.updateCallState(self, ci)
def onCallMediaState(self, prm):
ci = self.getInfo()
for mi in ci.media:
if mi.type == pj.PJMEDIA_TYPE_AUDIO and \
(mi.status == pj.PJSUA_CALL_MEDIA_ACTIVE or \
mi.status == pj.PJSUA_CALL_MEDIA_REMOTE_HOLD):
m = self.getMedia(mi.index)
am = pj.AudioMedia.typecastFromMedia(m)
# connect ports
ep.Endpoint.instance.audDevManager().getCaptureDevMedia().startTransmit(am)
am.startTransmit(ep.Endpoint.instance.audDevManager().getPlaybackDevMedia())
if mi.status == pj.PJSUA_CALL_MEDIA_REMOTE_HOLD and not self.onhold:
self.chat.addMessage(None, "'%s' sets call onhold" % (self.peerUri))
self.onhold = True
elif mi.status == pj.PJSUA_CALL_MEDIA_ACTIVE and self.onhold:
self.chat.addMessage(None, "'%s' sets call active" % (self.peerUri))
self.onhold = False
if self.chat:
self.chat.updateCallMediaState(self, ci)
def onInstantMessage(self, prm):
# chat instance should have been initalized
if not self.chat: return
self.chat.addMessage(self.peerUri, prm.msgBody)
self.chat.showWindow()
def onInstantMessageStatus(self, prm):
if prm.code/100 == 2: return
# chat instance should have been initalized
if not self.chat: return
self.chat.addMessage(None, "Failed sending message to '%s' (%d): %s" % (self.peerUri, prm.code, prm.reason))
def onTypingIndication(self, prm):
# chat instance should have been initalized
if not self.chat: return
self.chat.setTypingIndication(self.peerUri, prm.isTyping)
def onDtmfDigit(self, prm):
#msgbox.showinfo("pygui", 'Got DTMF:' + prm.digit)
pass
def onCallMediaTransportState(self, prm):
#msgbox.showinfo("pygui", "Media transport state")
pass
if __name__ == '__main__':
application.main()
| gpl-2.0 |
huntxu/fuel-web | nailgun/nailgun/test/unit/test_db_migrations.py | 4 | 1070 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import alembic
from nailgun.db import dropdb
from nailgun.db.migration import ALEMBIC_CONFIG
from nailgun.test import base
class TestDbMigrations(base.BaseTestCase):
def test_clean_downgrade(self):
# We don't have data migration for clusters with vip_type 'ovs'
# so checking migration only for clean DB
dropdb()
alembic.command.upgrade(ALEMBIC_CONFIG, 'head')
alembic.command.downgrade(ALEMBIC_CONFIG, 'base')
| apache-2.0 |
JRock007/boxxy | dist/Boxxy.app/Contents/Resources/lib/python2.7/numpy/lib/stride_tricks.py | 35 | 4228 | """
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['broadcast_arrays']
class DummyArray(object):
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def as_strided(x, shape=None, strides=None):
""" Make an ndarray from the given array with the given shape and strides.
"""
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# Make sure dtype is correct in case of custom dtype
if array.dtype.kind == 'V':
array.dtype = x.dtype
return array
def broadcast_arrays(*args):
"""
Broadcast any number of arrays against each other.
Parameters
----------
`*args` : array_likes
The arrays to broadcast.
Returns
-------
broadcasted : list of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location. If you
need to write to the arrays, make copies first.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> y = np.array([[1],[2],[3]])
>>> np.broadcast_arrays(x, y)
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]), array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])]
"""
args = [np.asarray(_m) for _m in args]
shapes = [x.shape for x in args]
if len(set(shapes)) == 1:
# Common case where nothing needs to be broadcasted.
return args
shapes = [list(s) for s in shapes]
strides = [list(x.strides) for x in args]
nds = [len(s) for s in shapes]
biggest = max(nds)
# Go through each array and prepend dimensions of length 1 to each of
# the shapes in order to make the number of dimensions equal.
for i in range(len(args)):
diff = biggest - nds[i]
if diff > 0:
shapes[i] = [1] * diff + shapes[i]
strides[i] = [0] * diff + strides[i]
# Chech each dimension for compatibility. A dimension length of 1 is
# accepted as compatible with any other length.
common_shape = []
for axis in range(biggest):
lengths = [s[axis] for s in shapes]
unique = set(lengths + [1])
if len(unique) > 2:
# There must be at least two non-1 lengths for this axis.
raise ValueError("shape mismatch: two or more arrays have "
"incompatible dimensions on axis %r." % (axis,))
elif len(unique) == 2:
# There is exactly one non-1 length. The common shape will take
# this value.
unique.remove(1)
new_length = unique.pop()
common_shape.append(new_length)
# For each array, if this axis is being broadcasted from a
# length of 1, then set its stride to 0 so that it repeats its
# data.
for i in range(len(args)):
if shapes[i][axis] == 1:
shapes[i][axis] = new_length
strides[i][axis] = 0
else:
# Every array has a length of 1 on this axis. Strides can be
# left alone as nothing is broadcasted.
common_shape.append(1)
# Construct the new arrays.
broadcasted = [as_strided(x, shape=sh, strides=st) for (x, sh, st) in
zip(args, shapes, strides)]
return broadcasted
| mit |
DirtyUnicorns/android_external_chromium_org | tools/telemetry/telemetry/core/possible_browser.py | 25 | 2112 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class PossibleBrowser(object):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, finder_options,
supports_tab_control):
self._browser_type = browser_type
self._target_os = target_os
self._finder_options = finder_options
self._supports_tab_control = supports_tab_control
self._platform = None
self._platform_backend = None
self._archive_path = None
self._append_to_existing_wpr = False
self._make_javascript_deterministic = True
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(browser_type=%s)' % self.browser_type
@property
def browser_type(self):
return self._browser_type
@property
def target_os(self):
"""Target OS, the browser will run on."""
return self._target_os
@property
def finder_options(self):
return self._finder_options
@property
def supports_tab_control(self):
return self._supports_tab_control
@property
def platform(self):
self._InitPlatformIfNeeded()
return self._platform
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self):
raise NotImplementedError()
def SupportsOptions(self, finder_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def SetReplayArchivePath(self, archive_path, append_to_existing_wpr,
make_javascript_deterministic):
self._archive_path = archive_path
self._append_to_existing_wpr = append_to_existing_wpr
self._make_javascript_deterministic = make_javascript_deterministic
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
| bsd-3-clause |
aospx-kitkat/platform_external_chromium_org | chrome/test/pyautolib/bookmark_model.py | 80 | 3206 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""BookmarkModel: python representation of the bookmark model.
Obtain one of these from PyUITestSuite::GetBookmarkModel() call.
"""
import os
import simplejson as json
import sys
class BookmarkModel(object):
def __init__(self, json_string):
"""Initialize a BookmarkModel from a string of json.
The JSON representation is the same as used by the bookmark model
to save to disk.
Args:
json_string: a string of JSON.
"""
self.bookdict = json.loads(json_string)
def BookmarkBar(self):
"""Return the bookmark bar node as a dict."""
return self.bookdict['roots']['bookmark_bar']
def Other(self):
"""Return the 'other' node (e.g. parent of "Other Bookmarks")"""
return self.bookdict['roots']['other']
def NodeCount(self, node=None):
"""Return a count of bookmark nodes, including folders.
The root node itself is included in the count.
Args:
node: the root to start with. If not specified, count all."""
if node == None:
return reduce(lambda x, y: x + y,
[self.NodeCount(x)
for x in self.bookdict['roots'].values()])
total = 1
children = node.get('children', None)
if children:
total = total + reduce(lambda x,y: x + y,
[self.NodeCount(x) for x in children])
return total
def FindByID(self, id, nodes=None):
"""Find the bookmark by id. Return the dict or None.
Args:
id: the id to look for.
nodes: an iterable of nodes to start with. If not specified, search all.
'Not specified' means None, not [].
"""
# Careful; we may get an empty list which is different than not
# having specified a list.
if nodes == None:
nodes = self.bookdict['roots'].values()
# Check each item. If it matches, return. If not, check each of
# their kids.
for node in nodes:
if node['id'] == id:
return node
for child in node.get('children', []):
found_node = self.FindByID(id, [child])
if found_node:
return found_node
# Not found at all.
return None
def FindByTitle(self, title, nodes=None):
"""Return a tuple of all nodes which have |title| in their title.
Args:
title: the title to look for.
node: an iterable of nodes to start with. If not specified, search all.
'Not specified' means None, not [].
"""
# Careful; we may get an empty list which is different than not
# having specified a list.
if nodes == None:
nodes = self.bookdict['roots'].values()
# Check each item. If it matches, return. If not, check each of
# their kids.
results = []
for node in nodes:
node_title = node.get('title', None) or node.get('name', None)
if title == node_title:
results.append(node)
# Note we check everything; unlike the FindByID, we do not stop early.
for child in node.get('children', []):
results += self.FindByTitle(title, [child])
return results
| bsd-3-clause |
haoyangw/android_kernel_xiaomi_dior-1 | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
icebreaker/dotfiles | gnome/gnome2/gedit/plugins.symlink/snapopen/__init__.py | 1 | 10248 | # VERSION 1.1.5
# Updated by Alexandre da Silva for GMate Project (http://blog.siverti.com.br/gmate)
import gedit, gtk, gtk.glade
import gconf
import gnomevfs
import pygtk
pygtk.require('2.0')
import os, os.path, gobject
# set this to true for gedit versions before 2.16
pre216_version = False
max_result = 50
ui_str="""<ui>
<menubar name="MenuBar">
<menu name="SearchMenu" action="Search">
<placeholder name="SearchOps_7">
<menuitem name="GoToFile" action="GoToFileAction"/>
</placeholder>
</menu>
</menubar>
</ui>
"""
# essential interface
class SnapOpenPluginInstance:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
if pre216_version:
self._encoding = gedit.gedit_encoding_get_current()
else:
self._encoding = gedit.encoding_get_current()
self._rootdir = "file://" + os.getcwd()
self._show_hidden = False
self._liststore = None;
self._init_glade()
self._insert_menu()
def deactivate(self):
self._remove_menu()
self._action_group = None
self._window = None
self._plugin = None
self._liststore = None;
def update_ui(self):
return
# MENU STUFF
def _insert_menu(self):
manager = self._window.get_ui_manager()
actions = [
('GoToFileAction', gtk.STOCK_JUMP_TO, _('Go to File...'), '<Ctrl><Alt>O', _("Go to a file with regex search"), self.on_snapopen_action)
]
self._action_group = gtk.ActionGroup("SnapOpenPluginActions")
self._action_group.add_actions(actions, self._window)
manager.insert_action_group(self._action_group, -1)
manager.add_ui_from_string(ui_str)
self._ui_id = manager.new_merge_id()
def _remove_menu(self):
manager = self._window.get_ui_manager()
manager.remove_ui(self._ui_id)
manager.remove_action_group(self._action_group)
# UI DIALOGUES
def _init_glade(self):
self._snapopen_glade = gtk.glade.XML(os.path.dirname(__file__) + "/snapopen.glade")
#setup window
self._snapopen_window = self._snapopen_glade.get_widget("SnapOpenWindow")
self._snapopen_window.connect("key-release-event", self.on_window_key)
self._snapopen_window.set_transient_for(self._window)
#setup buttons
self._snapopen_glade.get_widget("ok_button").connect("clicked", self.open_selected_item)
self._snapopen_glade.get_widget("cancel_button").connect("clicked", lambda a: self._snapopen_window.hide())
#setup entry field
self._glade_entry_name = self._snapopen_glade.get_widget("entry_name")
self._glade_entry_name.connect("key-release-event", self.on_pattern_entry)
#setup list field
self._hit_list = self._snapopen_glade.get_widget("hit_list")
self._hit_list.connect("select-cursor-row", self.on_select_from_list)
self._hit_list.connect("button_press_event", self.on_list_mouse)
self._liststore = gtk.ListStore(str, str, str)
self._hit_list.set_model(self._liststore)
column = gtk.TreeViewColumn("Name" , gtk.CellRendererText(), markup=0)
column.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
column2 = gtk.TreeViewColumn("File", gtk.CellRendererText(), markup=1)
column2.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self._hit_list.append_column(column)
self._hit_list.append_column(column2)
self._hit_list.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
#mouse event on list
def on_list_mouse(self, widget, event):
if event.type == gtk.gdk._2BUTTON_PRESS:
self.open_selected_item(event)
#key selects from list (passthrough 3 args)
def on_select_from_list(self, widget, event):
self.open_selected_item(event)
#keyboard event on entry field
def on_pattern_entry(self, widget, event):
oldtitle = self._snapopen_window.get_title().replace(" * too many hits", "")
if event.keyval == gtk.keysyms.Return:
self.open_selected_item(event)
return
pattern = self._glade_entry_name.get_text()
pattern = pattern.replace(" ","*")
#modify lines below as needed, these defaults work pretty well
rawpath = self._rootdir.replace("file://", "")
filefilter = " | grep -s -v \"/\.\""
cmd = ""
if self._show_hidden:
filefilter = ""
if len(pattern) > 0:
cmd = "cd " + rawpath + "; find . -maxdepth 10 -depth -type f -iwholename \"*" + pattern + "*\" " + filefilter + " | grep -v \"~$\" | head -n " + repr(max_result + 1) + " | sort"
self._snapopen_window.set_title("Searching ... ")
else:
self._snapopen_window.set_title("Enter pattern ... ")
#print cmd
self._liststore.clear()
maxcount = 0
hits = os.popen(cmd).readlines()
for file in hits:
file = file.rstrip().replace("./", "") #remove cwd prefix
name = os.path.basename(file)
self._liststore.append([self.highlight_pattern(name, pattern), self.highlight_pattern(file, pattern), file])
if maxcount > max_result:
break
maxcount = maxcount + 1
if maxcount > max_result:
oldtitle = oldtitle + " * too many hits"
self._snapopen_window.set_title(oldtitle)
selected = []
self._hit_list.get_selection().selected_foreach(self.foreach, selected)
if len(selected) == 0:
iter = self._liststore.get_iter_first()
if iter != None:
self._hit_list.get_selection().select_iter(iter)
def highlight_pattern(self, path, pattern):
query_list = pattern.lower().split("*")
last_postion = 0
for word in query_list:
location = path.lower().find(word, last_postion)
if location > -1:
last_postion = (location + len(word) + 3)
a_path = list(path)
a_path.insert(location, "<b>")
a_path.insert(location + len(word) + 1, "</b>")
path = "".join(a_path)
return path
#on menuitem activation (incl. shortcut)
def on_snapopen_action(self, *args):
fbroot = self.get_filebrowser_root()
if fbroot != "" and fbroot is not None:
self._rootdir = fbroot
self._snapopen_window.set_title("Snap open (Filebrowser integration)")
else:
eddtroot = self.get_eddt_root()
if eddtroot != "" and eddtroot is not None:
self._rootdir = eddtroot
self._snapopen_window.set_title("Snap open (EDDT integration)")
else:
self._snapopen_window.set_title("Snap open (cwd): " + self._rootdir)
self._snapopen_window.show()
self._glade_entry_name.select_region(0,-1)
self._glade_entry_name.grab_focus()
#on any keyboard event in main window
def on_window_key(self, widget, event):
if event.keyval == gtk.keysyms.Escape:
self._snapopen_window.hide()
def foreach(self, model, path, iter, selected):
selected.append(model.get_value(iter, 2))
#open file in selection and hide window
def open_selected_item(self, event):
selected = []
self._hit_list.get_selection().selected_foreach(self.foreach, selected)
for selected_file in selected:
self._open_file (selected_file)
self._snapopen_window.hide()
#gedit < 2.16 version (get_tab_from_uri)
def old_get_tab_from_uri(self, window, uri):
docs = window.get_documents()
for doc in docs:
if doc.get_uri() == uri:
return gedit.tab_get_from_document(doc)
return None
#opens (or switches to) the given file
def _open_file(self, filename):
uri = self._rootdir + "/" + filename
if pre216_version:
tab = self.old_get_tab_from_uri(self._window, uri)
else:
tab = self._window.get_tab_from_uri(uri)
if tab == None:
tab = self._window.create_tab_from_uri(uri, self._encoding, 0, False, False)
self._window.set_active_tab(tab)
# EDDT integration
def get_eddt_root(self):
base = u'/apps/gedit-2/plugins/eddt'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'repository')
val = client.get(path)
if val is not None:
return val.get_string()
# FILEBROWSER integration
def get_filebrowser_root(self):
base = u'/apps/gedit-2/plugins/filebrowser/on_load'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'virtual_root')
val = client.get(path)
if val is not None:
#also read hidden files setting
base = u'/apps/gedit-2/plugins/filebrowser'
client = gconf.client_get_default()
client.add_dir(base, gconf.CLIENT_PRELOAD_NONE)
path = os.path.join(base, u'filter_mode')
try:
fbfilter = client.get(path).get_string()
except AttributeError:
fbfilter = "hidden"
if fbfilter.find("hidden") == -1:
self._show_hidden = True
else:
self._show_hidden = False
return val.get_string()
# STANDARD PLUMMING
class SnapOpenPlugin(gedit.Plugin):
DATA_TAG = "SnapOpenPluginInstance"
def __init__(self):
gedit.Plugin.__init__(self)
def _get_instance(self, window):
return window.get_data(self.DATA_TAG)
def _set_instance(self, window, instance):
window.set_data(self.DATA_TAG, instance)
def activate(self, window):
self._set_instance(window, SnapOpenPluginInstance(self, window))
def deactivate(self, window):
self._get_instance(window).deactivate()
self._set_instance(window, None)
def update_ui(self, window):
self._get_instance(window).update_ui()
| mit |
keerts/home-assistant | homeassistant/components/switch/dlink.py | 11 | 4665 | """
Support for D-link W215 smart switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.dlink/
"""
import logging
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
from homeassistant.const import TEMP_CELSIUS, STATE_UNKNOWN
REQUIREMENTS = ['https://github.com/LinuxChristian/pyW215/archive/'
'v0.4.zip#pyW215==0.4']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'D-link Smart Plug W215'
DEFAULT_PASSWORD = ''
DEFAULT_USERNAME = 'admin'
CONF_USE_LEGACY_PROTOCOL = 'use_legacy_protocol'
ATTR_CURRENT_CONSUMPTION = 'Current Consumption'
ATTR_TOTAL_CONSUMPTION = 'Total Consumption'
ATTR_TEMPERATURE = 'Temperature'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_USE_LEGACY_PROTOCOL, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup a D-Link Smart Plug."""
from pyW215.pyW215 import SmartPlug
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_legacy_protocol = config.get(CONF_USE_LEGACY_PROTOCOL)
name = config.get(CONF_NAME)
data = SmartPlugData(SmartPlug(host,
password,
username,
use_legacy_protocol))
add_devices([SmartPlugSwitch(hass, data, name)], True)
class SmartPlugSwitch(SwitchDevice):
"""Representation of a D-link Smart Plug switch."""
def __init__(self, hass, data, name):
"""Initialize the switch."""
self.units = hass.config.units
self.data = data
self._name = name
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
try:
ui_temp = self.units.temperature(int(self.data.temperature),
TEMP_CELSIUS)
temperature = "%i %s" % \
(ui_temp, self.units.temperature_unit)
except (ValueError, TypeError):
temperature = STATE_UNKNOWN
try:
current_consumption = "%.2f W" % \
float(self.data.current_consumption)
except ValueError:
current_consumption = STATE_UNKNOWN
try:
total_consumption = "%.1f kWh" % \
float(self.data.total_consumption)
except ValueError:
total_consumption = STATE_UNKNOWN
attrs = {
ATTR_CURRENT_CONSUMPTION: current_consumption,
ATTR_TOTAL_CONSUMPTION: total_consumption,
ATTR_TEMPERATURE: temperature
}
return attrs
@property
def current_power_watt(self):
"""Return the current power usage in Watt."""
try:
return float(self.data.current_consumption)
except ValueError:
return None
@property
def is_on(self):
"""Return true if switch is on."""
return self.data.state == 'ON'
def turn_on(self, **kwargs):
"""Turn the switch on."""
self.data.smartplug.state = 'ON'
def turn_off(self):
"""Turn the switch off."""
self.data.smartplug.state = 'OFF'
def update(self):
"""Get the latest data from the smart plug and updates the states."""
self.data.update()
class SmartPlugData(object):
"""Get the latest data from smart plug."""
def __init__(self, smartplug):
"""Initialize the data object."""
self.smartplug = smartplug
self.state = None
self.temperature = None
self.current_consumption = None
self.total_consumption = None
def update(self):
"""Get the latest data from the smart plug."""
self.state = self.smartplug.state
self.temperature = self.smartplug.temperature
self.current_consumption = self.smartplug.current_consumption
self.total_consumption = self.smartplug.total_consumption
| apache-2.0 |
jbowes/yselect | test/mainmenutests.py | 1 | 2556 | # yselect - An RPM/Yum package handling frontend.
# Copyright (C) 2006 James Bowes <[email protected]>
# Copyright (C) 2006 Devan Goodwin <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import unittest
import settestpath
import mainmenu
class MainMenuModelTests(unittest.TestCase):
def setUp(self):
self.model = mainmenu.MainMenuModel("program name")
def testSelectBorderValues(self):
try:
self.model.select(-1)
self.fail()
except:
# Expected, do nothing.
pass
try:
self.model.select(len(self.model.entries))
self.fail()
except:
pass
try:
self.model.select(0)
except:
self.fail()
try:
self.model.select(len(self.model.entries) - 1)
except:
self.fail()
def testSignals(self):
observer = TestObserver()
self.model.add_observer("quit", observer)
self.model.add_observer("select", observer)
self.model.emit_signal("quit")
self.assertTrue(observer.been_notified)
self.assertEquals("quit", observer.notified_signal)
observer.reset()
self.model.emit_signal("select")
self.assertTrue(observer.been_notified)
self.assertEquals("select", observer.notified_signal)
class TestObserver:
def __init__(self):
self.been_notified = False
self.notified_signal = None
def notify(self, observable, signal_name):
self.been_notified = True
self.notified_signal = signal_name
def reset(self):
self.been_notified = False
self.notified_signal = None
def suite():
return unittest.makeSuite(MainMenuModelTests)
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| gpl-2.0 |
bealdav/OCB | addons/account/__openerp__.py | 41 | 7694 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'eInvoicing',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Accounting & Finance',
'description' : """
Accounting and Financial Management.
====================================
Financial and accounting module that covers:
--------------------------------------------
* General Accounting
* Cost/Analytic accounting
* Third party accounting
* Taxes management
* Budgets
* Customer and Supplier Invoices
* Bank statements
* Reconciliation process by partner
Creates a dashboard for accountants that includes:
--------------------------------------------------
* List of Customer Invoices to Approve
* Company Analysis
* Graph of Treasury
Processes like maintaining general ledgers are done through the defined Financial Journals (entry move line or grouping is maintained through a journal)
for a particular financial year and for preparation of vouchers there is a module named account_voucher.
""",
'website': 'https://www.odoo.com/page/billing',
'images' : ['images/accounts.jpeg','images/bank_statement.jpeg','images/cash_register.jpeg','images/chart_of_accounts.jpeg','images/customer_invoice.jpeg','images/journal_entries.jpeg'],
'depends' : ['base_setup', 'product', 'analytic', 'board', 'edi', 'report'],
'data': [
'security/account_security.xml',
'security/ir.model.access.csv',
'account_menuitem.xml',
'report/account_invoice_report_view.xml',
'report/account_entries_report_view.xml',
'report/account_treasury_report_view.xml',
'report/account_report_view.xml',
'report/account_analytic_entries_report_view.xml',
'wizard/account_move_bank_reconcile_view.xml',
'wizard/account_use_model_view.xml',
'account_installer.xml',
'wizard/account_period_close_view.xml',
'wizard/account_reconcile_view.xml',
'wizard/account_unreconcile_view.xml',
'wizard/account_statement_from_invoice_view.xml',
'account_view.xml',
'account_report.xml',
'account_financial_report_data.xml',
'wizard/account_report_common_view.xml',
'wizard/account_invoice_refund_view.xml',
'wizard/account_fiscalyear_close_state.xml',
'wizard/account_chart_view.xml',
'wizard/account_tax_chart_view.xml',
'wizard/account_move_line_reconcile_select_view.xml',
'wizard/account_open_closed_fiscalyear_view.xml',
'wizard/account_move_line_unreconcile_select_view.xml',
'wizard/account_vat_view.xml',
'wizard/account_report_print_journal_view.xml',
'wizard/account_report_general_journal_view.xml',
'wizard/account_report_central_journal_view.xml',
'wizard/account_subscription_generate_view.xml',
'wizard/account_fiscalyear_close_view.xml',
'wizard/account_state_open_view.xml',
'wizard/account_journal_select_view.xml',
'wizard/account_change_currency_view.xml',
'wizard/account_validate_move_view.xml',
'wizard/account_report_general_ledger_view.xml',
'wizard/account_invoice_state_view.xml',
'wizard/account_report_partner_balance_view.xml',
'wizard/account_report_account_balance_view.xml',
'wizard/account_report_aged_partner_balance_view.xml',
'wizard/account_report_partner_ledger_view.xml',
'wizard/account_reconcile_partner_process_view.xml',
'wizard/account_automatic_reconcile_view.xml',
'wizard/account_financial_report_view.xml',
'wizard/pos_box.xml',
'project/wizard/project_account_analytic_line_view.xml',
'account_end_fy.xml',
'account_invoice_view.xml',
'data/account_data.xml',
'data/data_account_type.xml',
'data/configurable_account_chart.xml',
'account_invoice_workflow.xml',
'project/project_view.xml',
'project/project_report.xml',
'project/wizard/account_analytic_balance_report_view.xml',
'project/wizard/account_analytic_cost_ledger_view.xml',
'project/wizard/account_analytic_inverted_balance_report.xml',
'project/wizard/account_analytic_journal_report_view.xml',
'project/wizard/account_analytic_cost_ledger_for_journal_report_view.xml',
'project/wizard/account_analytic_chart_view.xml',
'partner_view.xml',
'product_view.xml',
'account_assert_test.xml',
'ir_sequence_view.xml',
'company_view.xml',
'edi/invoice_action_data.xml',
'account_bank_view.xml',
'res_config_view.xml',
'account_pre_install.yml',
'views/report_vat.xml',
'views/report_invoice.xml',
'views/report_trialbalance.xml',
'views/report_centraljournal.xml',
'views/report_overdue.xml',
'views/report_generaljournal.xml',
'views/report_journal.xml',
'views/report_salepurchasejournal.xml',
'views/report_partnerbalance.xml',
'views/report_agedpartnerbalance.xml',
'views/report_partnerledger.xml',
'views/report_partnerledgerother.xml',
'views/report_financial.xml',
'views/report_generalledger.xml',
'project/views/report_analyticbalance.xml',
'project/views/report_analyticjournal.xml',
'project/views/report_analyticcostledgerquantity.xml',
'project/views/report_analyticcostledger.xml',
'project/views/report_invertedanalyticbalance.xml',
'views/account.xml',
],
'qweb' : [
"static/src/xml/account_move_reconciliation.xml",
"static/src/xml/account_move_line_quickadd.xml",
"static/src/xml/account_bank_statement_reconciliation.xml",
],
'demo': [
'demo/account_demo.xml',
'project/project_demo.xml',
'project/analytic_account_demo.xml',
'demo/account_minimal.xml',
'demo/account_invoice_demo.xml',
'demo/account_bank_statement.xml',
'account_unit_test.xml',
],
'test': [
'test/account_test_users.yml',
'test/account_customer_invoice.yml',
'test/account_supplier_invoice.yml',
'test/account_change_currency.yml',
'test/chart_of_account.yml',
'test/account_period_close.yml',
'test/account_use_model.yml',
'test/account_validate_account_move.yml',
'test/test_edi_invoice.yml',
'test/account_report.yml',
'test/account_fiscalyear_close.yml', #last test, as it will definitively close the demo fiscalyear
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dllsf/odootest | openerp/addons/test_new_api/tests/test_new_fields.py | 51 | 15560 | #
# test cases for new-style fields
#
from datetime import date, datetime
from collections import defaultdict
from openerp.tests import common
from openerp.exceptions import except_orm
class TestNewFields(common.TransactionCase):
def test_00_basics(self):
""" test accessing new fields """
# find a discussion
discussion = self.env.ref('test_new_api.discussion_0')
# read field as a record attribute or as a record item
self.assertIsInstance(discussion.name, basestring)
self.assertIsInstance(discussion['name'], basestring)
self.assertEqual(discussion['name'], discussion.name)
# read it with method read()
values = discussion.read(['name'])[0]
self.assertEqual(values['name'], discussion.name)
def test_01_basic_get_assertion(self):
""" test item getter """
# field access works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
ok = record.body
# field access fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
faulty = records.body
def test_01_basic_set_assertion(self):
""" test item setter """
# field assignment works on single record
record = self.env.ref('test_new_api.message_0_0')
self.assertEqual(len(record), 1)
record.body = 'OK'
# field assignment fails on multiple records
records = self.env['test_new_api.message'].search([])
assert len(records) > 1
with self.assertRaises(except_orm):
records.body = 'Faulty'
def test_10_computed(self):
""" check definition of computed fields """
# by default function fields are not stored and readonly
field = self.env['test_new_api.message']._fields['size']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
field = self.env['test_new_api.message']._fields['name']
self.assertTrue(field.store)
self.assertTrue(field.readonly)
def test_10_non_stored(self):
""" test non-stored fields """
# find messages
for message in self.env['test_new_api.message'].search([]):
# check definition of field
self.assertEqual(message.size, len(message.body or ''))
# check recomputation after record is modified
size = message.size
message.write({'body': (message.body or '') + "!!!"})
self.assertEqual(message.size, size + 3)
# special case: computed field without dependency must be computed
record = self.env['test_new_api.mixed'].create({})
self.assertTrue(record.now)
def test_11_stored(self):
""" test stored fields """
# find the demo discussion
discussion = self.env.ref('test_new_api.discussion_0')
self.assertTrue(len(discussion.messages) > 0)
# check messages
name0 = discussion.name or ""
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name0, message.author.name))
# modify discussion name, and check again messages
discussion.name = name1 = 'Talking about stuff...'
for message in discussion.messages:
self.assertEqual(message.name, "[%s] %s" % (name1, message.author.name))
# switch message from discussion, and check again
name2 = 'Another discussion'
discussion2 = discussion.copy({'name': name2})
message2 = discussion.messages[0]
message2.discussion = discussion2
for message in discussion2.messages:
self.assertEqual(message.name, "[%s] %s" % (name2, message.author.name))
def test_12_recursive(self):
""" test recursively dependent fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
cath.parent = finn.parent = gabe
abel.parent = beth.parent = cath
dean.parent = ewan.parent = finn
self.assertEqual(abel.display_name, "Gabriel / Catherine / Abel")
self.assertEqual(beth.display_name, "Gabriel / Catherine / Bethany")
self.assertEqual(cath.display_name, "Gabriel / Catherine")
self.assertEqual(dean.display_name, "Gabriel / Finnley / Dean")
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Ewan")
self.assertEqual(finn.display_name, "Gabriel / Finnley")
self.assertEqual(gabe.display_name, "Gabriel")
ewan.parent = cath
self.assertEqual(ewan.display_name, "Gabriel / Catherine / Ewan")
cath.parent = finn
self.assertEqual(ewan.display_name, "Gabriel / Finnley / Catherine / Ewan")
def test_12_cascade(self):
""" test computed field depending on computed field """
message = self.env.ref('test_new_api.message_0_0')
message.invalidate_cache()
double_size = message.double_size
self.assertEqual(double_size, message.size)
def test_13_inverse(self):
""" test inverse computation of fields """
Category = self.env['test_new_api.category']
abel = Category.create({'name': 'Abel'})
beth = Category.create({'name': 'Bethany'})
cath = Category.create({'name': 'Catherine'})
dean = Category.create({'name': 'Dean'})
ewan = Category.create({'name': 'Ewan'})
finn = Category.create({'name': 'Finnley'})
gabe = Category.create({'name': 'Gabriel'})
self.assertEqual(ewan.display_name, "Ewan")
ewan.display_name = "Abel / Bethany / Catherine / Erwan"
self.assertEqual(beth.parent, abel)
self.assertEqual(cath.parent, beth)
self.assertEqual(ewan.parent, cath)
self.assertEqual(ewan.name, "Erwan")
def test_14_search(self):
""" test search on computed fields """
discussion = self.env.ref('test_new_api.discussion_0')
# determine message sizes
sizes = set(message.size for message in discussion.messages)
# search for messages based on their size
for size in sizes:
messages0 = self.env['test_new_api.message'].search(
[('discussion', '=', discussion.id), ('size', '<=', size)])
messages1 = self.env['test_new_api.message'].browse()
for message in discussion.messages:
if message.size <= size:
messages1 += message
self.assertEqual(messages0, messages1)
def test_15_constraint(self):
""" test new-style Python constraints """
discussion = self.env.ref('test_new_api.discussion_0')
# remove oneself from discussion participants: we can no longer create
# messages in discussion
discussion.participants -= self.env.user
with self.assertRaises(Exception):
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
# put back oneself into discussion participants: now we can create
# messages in discussion
discussion.participants += self.env.user
self.env['test_new_api.message'].create({'discussion': discussion.id, 'body': 'Whatever'})
def test_20_float(self):
""" test float fields """
record = self.env['test_new_api.mixed'].create({})
# assign value, and expect rounding
record.write({'number': 2.4999999999999996})
self.assertEqual(record.number, 2.50)
# same with field setter
record.number = 2.4999999999999996
self.assertEqual(record.number, 2.50)
def test_21_date(self):
""" test date fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.date = None
self.assertFalse(record.date)
# one may assign date and datetime objects
record.date = date(2012, 05, 01)
self.assertEqual(record.date, '2012-05-01')
record.date = datetime(2012, 05, 01, 10, 45, 00)
self.assertEqual(record.date, '2012-05-01')
# one may assign dates in the default format, and it must be checked
record.date = '2012-05-01'
self.assertEqual(record.date, '2012-05-01')
with self.assertRaises(ValueError):
record.date = '12-5-1'
def test_22_selection(self):
""" test selection fields """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.lang = None
self.assertFalse(record.lang)
# one may assign a value, and it must be checked
for language in self.env['res.lang'].search([]):
record.lang = language.code
with self.assertRaises(ValueError):
record.lang = 'zz_ZZ'
def test_23_relation(self):
""" test relation fields """
demo = self.env.ref('base.user_demo')
message = self.env.ref('test_new_api.message_0_0')
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# check environment of record and related records
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
# "migrate" message into demo_env, and check again
demo_message = message.sudo(demo)
self.assertEqual(demo_message.env, demo_env)
self.assertEqual(demo_message.discussion.env, demo_env)
# assign record's parent to a record in demo_env
message.discussion = message.discussion.copy({'name': 'Copy'})
# both message and its parent field must be in self.env
self.assertEqual(message.env, self.env)
self.assertEqual(message.discussion.env, self.env)
def test_24_reference(self):
""" test reference fields. """
record = self.env['test_new_api.mixed'].create({})
# one may assign False or None
record.reference = None
self.assertFalse(record.reference)
# one may assign a user or a partner...
record.reference = self.env.user
self.assertEqual(record.reference, self.env.user)
record.reference = self.env.user.partner_id
self.assertEqual(record.reference, self.env.user.partner_id)
# ... but no record from a model that starts with 'ir.'
with self.assertRaises(ValueError):
record.reference = self.env['ir.model'].search([], limit=1)
def test_25_related(self):
""" test related fields. """
message = self.env.ref('test_new_api.message_0_0')
discussion = message.discussion
# by default related fields are not stored
field = message._fields['discussion_name']
self.assertFalse(field.store)
self.assertTrue(field.readonly)
# check value of related field
self.assertEqual(message.discussion_name, discussion.name)
# change discussion name, and check result
discussion.name = 'Foo'
self.assertEqual(message.discussion_name, 'Foo')
# change discussion name via related field, and check result
message.discussion_name = 'Bar'
self.assertEqual(discussion.name, 'Bar')
self.assertEqual(message.discussion_name, 'Bar')
# search on related field, and check result
search_on_related = self.env['test_new_api.message'].search([('discussion_name', '=', 'Bar')])
search_on_regular = self.env['test_new_api.message'].search([('discussion.name', '=', 'Bar')])
self.assertEqual(search_on_related, search_on_regular)
# check that field attributes are copied
message_field = message.fields_get(['discussion_name'])['discussion_name']
discussion_field = discussion.fields_get(['name'])['name']
self.assertEqual(message_field['help'], discussion_field['help'])
def test_26_inherited(self):
""" test inherited fields. """
# a bunch of fields are inherited from res_partner
for user in self.env['res.users'].search([]):
partner = user.partner_id
for field in ('is_company', 'name', 'email', 'country_id'):
self.assertEqual(getattr(user, field), getattr(partner, field))
self.assertEqual(user[field], partner[field])
def test_30_read(self):
""" test computed fields as returned by read(). """
discussion = self.env.ref('test_new_api.discussion_0')
for message in discussion.messages:
display_name = message.display_name
size = message.size
data = message.read(['display_name', 'size'])[0]
self.assertEqual(data['display_name'], display_name)
self.assertEqual(data['size'], size)
def test_40_new(self):
""" test new records. """
discussion = self.env.ref('test_new_api.discussion_0')
# create a new message
message = self.env['test_new_api.message'].new()
self.assertFalse(message.id)
# assign some fields; should have no side effect
message.discussion = discussion
message.body = BODY = "May the Force be with you."
self.assertEqual(message.discussion, discussion)
self.assertEqual(message.body, BODY)
self.assertNotIn(message, discussion.messages)
# check computed values of fields
user = self.env.user
self.assertEqual(message.author, user)
self.assertEqual(message.name, "[%s] %s" % (discussion.name, user.name))
self.assertEqual(message.size, len(BODY))
def test_41_defaults(self):
""" test default values. """
fields = ['discussion', 'body', 'author', 'size']
defaults = self.env['test_new_api.message'].default_get(fields)
self.assertEqual(defaults, {'author': self.env.uid, 'size': 0})
defaults = self.env['test_new_api.mixed'].default_get(['number'])
self.assertEqual(defaults, {'number': 3.14})
class TestMagicFields(common.TransactionCase):
def test_write_date(self):
record = self.env['test_new_api.discussion'].create({'name': 'Booba'})
self.assertEqual(record.create_uid, self.env.user)
self.assertEqual(record.write_uid, self.env.user)
class TestInherits(common.TransactionCase):
def test_inherits(self):
""" Check that a many2one field with delegate=True adds an entry in _inherits """
Talk = self.env['test_new_api.talk']
self.assertEqual(Talk._inherits, {'test_new_api.discussion': 'parent'})
self.assertIn('name', Talk._fields)
self.assertEqual(Talk._fields['name'].related, ('parent', 'name'))
talk = Talk.create({'name': 'Foo'})
discussion = talk.parent
self.assertTrue(discussion)
self.assertEqual(talk._name, 'test_new_api.talk')
self.assertEqual(discussion._name, 'test_new_api.discussion')
self.assertEqual(talk.name, discussion.name)
| agpl-3.0 |
alajara/servo | components/script/dom/bindings/codegen/parser/tests/test_implements.py | 264 | 5961 | # Import the WebIDL module, so we can do isinstance checks and whatnot
import WebIDL
def WebIDLTest(parser, harness):
# Basic functionality
threw = False
try:
parser.parse("""
A implements B;
interface B {
attribute long x;
};
interface A {
attribute long y;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Should not have thrown on implements statement "
"before interfaces")
harness.check(len(results), 3, "We have three statements")
harness.ok(isinstance(results[1], WebIDL.IDLInterface), "B is an interface")
harness.check(len(results[1].members), 1, "B has one member")
A = results[2]
harness.ok(isinstance(A, WebIDL.IDLInterface), "A is an interface")
harness.check(len(A.members), 2, "A has two members")
harness.check(A.members[0].identifier.name, "y", "First member is 'y'")
harness.check(A.members[1].identifier.name, "x", "Second member is 'x'")
# Duplicated member names not allowed
threw = False
try:
parser.parse("""
C implements D;
interface D {
attribute long x;
};
interface C {
attribute long x;
};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface duplicating "
"a name on base interface")
# Same, but duplicated across implemented interfaces
threw = False
try:
parser.parse("""
E implements F;
E implements G;
interface F {
attribute long x;
};
interface G {
attribute long x;
};
interface E {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across indirectly implemented interfaces
threw = False
try:
parser.parse("""
H implements I;
H implements J;
I implements K;
interface K {
attribute long x;
};
interface L {
attribute long x;
};
interface I {};
interface J : L {};
interface H {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on indirectly implemented interfaces "
"duplicating each other's member names")
# Same, but duplicated across an implemented interface and its parent
threw = False
try:
parser.parse("""
M implements N;
interface O {
attribute long x;
};
interface N : O {
attribute long x;
};
interface M {};
""")
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown on implemented interface and its "
"ancestor duplicating member names")
# Reset the parser so we can actually find things where we expect
# them in the list
parser = parser.reset()
# Diamonds should be allowed
threw = False
try:
parser.parse("""
P implements Q;
P implements R;
Q implements S;
R implements S;
interface Q {};
interface R {};
interface S {
attribute long x;
};
interface P {};
""")
results = parser.finish()
except:
threw = True
harness.ok(not threw, "Diamond inheritance is fine")
harness.check(results[6].identifier.name, "S", "We should be looking at 'S'")
harness.check(len(results[6].members), 1, "S should have one member")
harness.check(results[6].members[0].identifier.name, "x",
"S's member should be 'x'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestInterface implements TestCallbackInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the right-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface {
};
TestCallbackInterface implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow callback interfaces on the left-hand side of "
"'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
Dict implements TestInterface;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the left-hand side "
"of 'implements'")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
dictionary Dict {
};
TestInterface implements Dict;
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Should not allow non-interfaces on the right-hand side "
"of 'implements'")
| mpl-2.0 |
QinerTech/QinerApps | openerp/addons/decimal_precision/decimal_precision.py | 47 | 2671 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import orm, fields
from openerp.modules.registry import RegistryManager
class decimal_precision(orm.Model):
_name = 'decimal.precision'
_columns = {
'name': fields.char('Usage', select=True, required=True),
'digits': fields.integer('Digits', required=True),
}
_defaults = {
'digits': 2,
}
_sql_constraints = [
('name_uniq', 'unique (name)', """Only one value can be defined for each given usage!"""),
]
@tools.ormcache('application')
def precision_get(self, cr, uid, application):
cr.execute('select digits from decimal_precision where name=%s', (application,))
res = cr.fetchone()
return res[0] if res else 2
def clear_cache(self, cr):
""" Deprecated, use `clear_caches` instead. """
self.clear_caches()
def create(self, cr, uid, data, context=None):
res = super(decimal_precision, self).create(cr, uid, data, context=context)
self.clear_caches()
return res
def unlink(self, cr, uid, ids, context=None):
res = super(decimal_precision, self).unlink(cr, uid, ids, context=context)
self.clear_caches()
return res
def write(self, cr, uid, ids, data, *args, **argv):
res = super(decimal_precision, self).write(cr, uid, ids, data, *args, **argv)
self.clear_caches()
return res
def get_precision(application):
def change_digit(cr):
decimal_precision = openerp.registry(cr.dbname)['decimal.precision']
res = decimal_precision.precision_get(cr, SUPERUSER_ID, application)
return (16, res)
return change_digit
class DecimalPrecisionFloat(orm.AbstractModel):
""" Override qweb.field.float to add a `decimal_precision` domain option
and use that instead of the column's own value if it is specified
"""
_inherit = 'ir.qweb.field.float'
def precision(self, cr, uid, field, options=None, context=None):
dp = options and options.get('decimal_precision')
if dp:
return self.pool['decimal.precision'].precision_get(
cr, uid, dp)
return super(DecimalPrecisionFloat, self).precision(
cr, uid, field, options=options, context=context)
class DecimalPrecisionTestModel(orm.Model):
_name = 'decimal.precision.test'
_columns = {
'float': fields.float(),
'float_2': fields.float(digits=(16, 2)),
'float_4': fields.float(digits=(16, 4)),
}
| gpl-3.0 |
psztorc/Truthcoin | lib-other/pylib/consensus/custommath.py | 2 | 4765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Supporting math for the consensus mechanism.
"""
from __future__ import division
from numpy import *
from numpy.linalg import *
def WeightedMedian(data, weights):
"""Calculate a weighted median.
Args:
data (list or numpy.array): data
weights (list or numpy.array): weights
"""
data, weights = array(data).squeeze(), array(weights).squeeze()
s_data, s_weights = map(array, zip(*sorted(zip(data, weights))))
midpoint = 0.5 * sum(s_weights)
if any(weights > midpoint):
w_median = median(data[weights == max(weights)])
else:
cs_weights = cumsum(s_weights)
idx = where(cs_weights <= midpoint)[0][-1]
if cs_weights[idx] == midpoint:
w_median = mean(s_data[idx:idx+2])
else:
w_median = s_data[idx+1]
return w_median
def Rescale(UnscaledMatrix, Scales):
"""Forces a matrix of raw (user-supplied) information
(for example, # of House Seats, or DJIA) to conform to
svd-appropriate range.
Practically, this is done by subtracting min and dividing by
scaled-range (which itself is max-min).
"""
# Calulate multiplicative factors
InvSpan = []
for scale in Scales:
InvSpan.append(1 / float(scale["max"] - scale["min"]))
# Recenter
OutMatrix = ma.copy(UnscaledMatrix)
cols = UnscaledMatrix.shape[1]
for i in range(cols):
OutMatrix[:,i] -= Scales[i]["min"]
# Rescale
NaIndex = isnan(OutMatrix)
OutMatrix[NaIndex] = 0
OutMatrix = dot(OutMatrix, diag(InvSpan))
OutMatrix[NaIndex] = nan
return OutMatrix
def MeanNa(Vec):
"""Takes masked array, replaces missing values with array mean."""
MM = mean(Vec)
Vec[where(Vec.mask)] = MM
return(Vec)
def GetWeight(Vec, AddMean=0):
"""Takes an array (vector in practice), and returns proportional distance from zero."""
New = abs(Vec) #Absolute Value
if AddMean == 1: #Add the mean to each element of the vector
New = New + mean(New)
if sum(New) == 0: #Catch an error here
New = New + 1
New = New/sum(New) #Normalize
return(New)
def Catch(X,Tolerance=0):
"""Forces continuous values into bins at 0, .5, and 1"""
if X < (.5-(Tolerance/2)):
return(0)
elif X > (.5+(Tolerance/2)):
return(1)
else:
return(.5)
def Influence(Weight):
"""Takes a normalized Vector (one that sums to 1), and computes relative strength of the indicators."""
N = len(Weight)
Expected = [[1/N]]*N
Out = []
for i in range(1, N):
Out.append(Weight[i]/Expected[i])
return(Out)
def ReWeight(Vec):
"""Get the relative influence of numbers, treat NaN as influence-less."""
Out = Vec
Exclude = isnan(Vec)
Out[Exclude] = 0 #set missing to 0
Out = Out / sum(Out) #normalize
return(Out)
def ReverseMatrix(Mat): #tecnically an array now, sorry about the terminology confusion
return( (Mat-1) * -1 )
def DemocracyCoin(Mat):
"""For testing, easier to assume uniform coin distribution."""
# print("NOTE: No coin distribution given, assuming democracy [one row, one vote].")
Rep = GetWeight( array([[1]]*len(Mat) )) #Uniform weights if none were provided.
return( Rep )
def WeightedCov(Mat,Rep=-1):
"""Takes 1] a masked array, and 2] an [n x 1] dimentional array of weights, and computes the weighted covariance
matrix and center of a given array.
Taken from http://stats.stackexchange.com/questions/61225/correct-equation-for-weighted-unbiased-sample-covariance"""
if type(Rep) is int:
Rep = DemocracyCoin(Mat)
Coins = ma.copy(Rep)
for i in range(len(Rep)):
Coins[i] = (int( (Rep[i] * 1000000)[0] ))
Mean = ma.average(Mat, axis=0, weights=hstack(Coins)) # Computing the weighted sample mean (fast, efficient and precise)
XM = matrix( Mat-Mean ) # xm = X diff to mean
sigma2 = matrix( 1/(sum(Coins)-1) * ma.multiply(XM, Coins).T.dot(XM) ); # Compute the unbiased weighted sample covariance
return( {'Cov':array(sigma2), 'Center':array(XM) } )
def WeightedPrinComp(Mat,Rep=-1):
"""Takes a matrix and row-weights and manually computes the statistical procedure known as Principal Components Analysis (PCA)
This version of the procedure is so basic, that it can also be thought of as merely a singular-value decomposition on a weighted covariance matrix."""
wCVM = WeightedCov(Mat,Rep)
SVD = svd(wCVM['Cov'])
L = SVD[0].T[0] #First loading
S = dot(wCVM['Center'],SVD[0]).T[0] #First Score
return(L,S)
if __name__ == "__main__":
pass
| mit |
felipenaselva/repo.felipe | plugin.video.uwc/tubepornclassic.py | 1 | 3359 | '''
Ultimate Whitecream
Copyright (C) 2015 mortael
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, re
import xbmc, xbmcplugin, xbmcgui, xbmcaddon
import utils
progress = utils.progress
def Main():
utils.addDir('[COLOR hotpink]Categories[/COLOR]','http://www.tubepornclassic.com/categories/', 363, '', '')
utils.addDir('[COLOR hotpink]Top Rated[/COLOR]','http://www.tubepornclassic.com/top-rated/', 361, '', '')
utils.addDir('[COLOR hotpink]Most Viewed[/COLOR]','http://www.tubepornclassic.com/most-popular/', 361, '', '')
utils.addDir('[COLOR hotpink]Search[/COLOR]','http://www.tubepornclassic.com/search/', 364, '', '')
List('http://www.tubepornclassic.com/latest-updates/')
xbmcplugin.endOfDirectory(utils.addon_handle)
def List(url):
listhtml = utils.getHtml(url, '')
match = re.compile('<a href="([^"]+)" title="([^"]+)".*?original="([^"]+)".*?duration">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml)
for videopage, name, img, duration in match:
name = utils.cleantext(name)
name = name + " [COLOR deeppink]" + duration + "[/COLOR]"
utils.addDownLink(name, videopage, 362, img, '')
try:
nextp = re.compile('<a href="([^"]+)"[^>]+>Next', re.DOTALL | re.IGNORECASE).findall(listhtml)
utils.addDir('Next Page', 'http://www.tubepornclassic.com/' + nextp[0], 361,'')
except: pass
xbmcplugin.endOfDirectory(utils.addon_handle)
def Search(url, keyword=None):
searchUrl = url
if not keyword:
utils.searchDir(url, 364)
else:
title = keyword.replace(' ','%20')
searchUrl = searchUrl + title + "/"
print "Searching URL: " + searchUrl
List(searchUrl)
def Cat(url):
listhtml = utils.getHtml(url, '')
match = re.compile('<a class="item" href="([^"]+)" title="([^"]+)".*?data-original="([^"]+)".*?videos">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(listhtml)
for catpage, name, img, videos in match:
name = utils.cleantext(name) + " [COLOR deeppink]" + videos + "[/COLOR]"
utils.addDir(name, catpage, 361, img, '')
xbmcplugin.endOfDirectory(utils.addon_handle)
def Playvid(url, name, download=None):
videopage = utils.getHtml(url, '')
videourl = re.compile("video_url: '([^']+)", re.DOTALL | re.IGNORECASE).findall(videopage)[0]
videourl = utils.getVideoLink(videourl, url)
if download == 1:
utils.downloadVideo(videourl, name)
else:
iconimage = xbmc.getInfoImage("ListItem.Thumb")
listitem = xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
listitem.setInfo('video', {'Title': name, 'Genre': 'Porn'})
xbmc.Player().play(videourl, listitem) | gpl-2.0 |
ebsaral/django-rest-framework | rest_framework/permissions.py | 71 | 6444 | """
Provides a set of pluggable permission policies.
"""
from __future__ import unicode_literals
from django.http import Http404
from rest_framework.compat import get_model_name
SAFE_METHODS = ('GET', 'HEAD', 'OPTIONS')
class BasePermission(object):
"""
A base class from which all permission classes should inherit.
"""
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
def has_object_permission(self, request, view, obj):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
class AllowAny(BasePermission):
"""
Allow any access.
This isn't strictly required, since you could use an empty
permission_classes list, but it's useful because it makes the intention
more explicit.
"""
def has_permission(self, request, view):
return True
class IsAuthenticated(BasePermission):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
class IsAdminUser(BasePermission):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return request.user and request.user.is_staff
class IsAuthenticatedOrReadOnly(BasePermission):
"""
The request is authenticated as a user, or is a read-only request.
"""
def has_permission(self, request, view):
return (
request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated()
)
class DjangoModelPermissions(BasePermission):
"""
The request is authenticated using `django.contrib.auth` permissions.
See: https://docs.djangoproject.com/en/dev/topics/auth/#permissions
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the model.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
# Map methods into required permission codes.
# Override this if you need to also provide 'view' permissions,
# or if you want to provide custom permission codes.
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
authenticated_users_only = True
def get_required_permissions(self, method, model_cls):
"""
Given a model and an HTTP method, return the list of permission
codes that the user is required to have.
"""
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_permission(self, request, view):
# Workaround to ensure DjangoModelPermissions are not applied
# to the root view when using DefaultRouter.
if getattr(view, '_ignore_model_permissions', False):
return True
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoModelPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
perms = self.get_required_permissions(request.method, queryset.model)
return (
request.user and
(request.user.is_authenticated() or not self.authenticated_users_only) and
request.user.has_perms(perms)
)
class DjangoModelPermissionsOrAnonReadOnly(DjangoModelPermissions):
"""
Similar to DjangoModelPermissions, except that anonymous users are
allowed read-only access.
"""
authenticated_users_only = False
class DjangoObjectPermissions(DjangoModelPermissions):
"""
The request is authenticated using Django's object-level permissions.
It requires an object-permissions-enabled backend, such as Django Guardian.
It ensures that the user is authenticated, and has the appropriate
`add`/`change`/`delete` permissions on the object using .has_perms.
This permission can only be applied against view classes that
provide a `.queryset` attribute.
"""
perms_map = {
'GET': [],
'OPTIONS': [],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
'PUT': ['%(app_label)s.change_%(model_name)s'],
'PATCH': ['%(app_label)s.change_%(model_name)s'],
'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
def get_required_object_permissions(self, method, model_cls):
kwargs = {
'app_label': model_cls._meta.app_label,
'model_name': get_model_name(model_cls)
}
return [perm % kwargs for perm in self.perms_map[method]]
def has_object_permission(self, request, view, obj):
try:
queryset = view.get_queryset()
except AttributeError:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoObjectPermissions on a view that '
'does not have `.queryset` property or overrides the '
'`.get_queryset()` method.')
model_cls = queryset.model
user = request.user
perms = self.get_required_object_permissions(request.method, model_cls)
if not user.has_perms(perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
| bsd-2-clause |
jcoady9/beets | test/testall.py | 1 | 1320 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import os
import re
import sys
from test._common import unittest
pkgpath = os.path.dirname(__file__) or '.'
sys.path.append(pkgpath)
os.chdir(pkgpath)
def suite():
s = unittest.TestSuite()
# Get the suite() of every module in this directory beginning with
# "test_".
for fname in os.listdir(pkgpath):
match = re.match(r'(test_\S+)\.py$', fname)
if match:
modname = match.group(1)
s.addTest(__import__(modname).suite())
return s
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| mit |
kkdd/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_with.py | 53 | 23715 | #!/usr/bin/env python
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import GeneratorContextManager, contextmanager
from test.test_support import run_unittest
class MockContextManager(GeneratorContextManager):
def __init__(self, gen):
GeneratorContextManager.__init__(self, gen)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func(*args, **kwds))
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0], ex[1], ex[2]
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args[0], RuntimeError)
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise iter([]).next()
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __nonzero__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(targets.keys(), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as targets.values()[0][1]:
self.assertEqual(targets.keys(), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = targets.keys()
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (targets.values()[0][2], targets.values()[0][1], targets.values()[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase)
if __name__ == '__main__':
test_main()
| apache-2.0 |
BaconPancakes/valor | lib/youtube_dl/extractor/mitele.py | 9 | 8002 | # coding: utf-8
from __future__ import unicode_literals
import uuid
from .common import InfoExtractor
from .ooyala import OoyalaIE
from ..compat import (
compat_str,
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
int_or_none,
extract_attributes,
determine_ext,
smuggle_url,
parse_duration,
)
class MiTeleBaseIE(InfoExtractor):
def _get_player_info(self, url, webpage):
player_data = extract_attributes(self._search_regex(
r'(?s)(<ms-video-player.+?</ms-video-player>)',
webpage, 'ms video player'))
video_id = player_data['data-media-id']
if player_data.get('data-cms-id') == 'ooyala':
return self.url_result(
'ooyala:%s' % video_id, ie=OoyalaIE.ie_key(), video_id=video_id)
config_url = compat_urlparse.urljoin(url, player_data['data-config'])
config = self._download_json(
config_url, video_id, 'Downloading config JSON')
mmc_url = config['services']['mmc']
duration = None
formats = []
for m_url in (mmc_url, mmc_url.replace('/flash.json', '/html5.json')):
mmc = self._download_json(
m_url, video_id, 'Downloading mmc JSON')
if not duration:
duration = int_or_none(mmc.get('duration'))
for location in mmc['locations']:
gat = self._proto_relative_url(location.get('gat'), 'http:')
bas = location.get('bas')
loc = location.get('loc')
ogn = location.get('ogn')
if None in (gat, bas, loc, ogn):
continue
token_data = {
'bas': bas,
'icd': loc,
'ogn': ogn,
'sta': '0',
}
media = self._download_json(
'%s/?%s' % (gat, compat_urllib_parse_urlencode(token_data)),
video_id, 'Downloading %s JSON' % location['loc'])
file_ = media.get('file')
if not file_:
continue
ext = determine_ext(file_)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
file_ + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
file_, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'thumbnail': player_data.get('data-poster') or config.get('poster', {}).get('imageUrl'),
'duration': duration,
}
class MiTeleIE(InfoExtractor):
IE_DESC = 'mitele.es'
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player'
_TESTS = [{
'url': 'http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player',
'info_dict': {
'id': '57b0dfb9c715da65618b4afa',
'ext': 'mp4',
'title': 'Tor, la web invisible',
'description': 'md5:3b6fce7eaa41b2d97358726378d9369f',
'series': 'Diario de',
'season': 'La redacción',
'season_number': 14,
'season_id': 'diario_de_t14_11981',
'episode': 'Programa 144',
'episode_number': 3,
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'duration': 2913,
},
'add_ie': ['Ooyala'],
}, {
# no explicit title
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
'info_dict': {
'id': '57b0de3dc915da14058b4876',
'ext': 'mp4',
'title': 'Cuarto Milenio Temporada 6 Programa 226',
'description': 'md5:5ff132013f0cd968ffbf1f5f3538a65f',
'series': 'Cuarto Milenio',
'season': 'Temporada 6',
'season_number': 6,
'season_id': 'cuarto_milenio_t06_12715',
'episode': 'Programa 226',
'episode_number': 24,
'thumbnail': r're:(?i)^https?://.*\.jpg$',
'duration': 7313,
},
'params': {
'skip_download': True,
},
'add_ie': ['Ooyala'],
}, {
'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
gigya_url = self._search_regex(
r'<gigya-api>[^>]*</gigya-api>[^>]*<script\s+src="([^"]*)">[^>]*</script>',
webpage, 'gigya', default=None)
gigya_sc = self._download_webpage(
compat_urlparse.urljoin('http://www.mitele.es/', gigya_url),
video_id, 'Downloading gigya script')
# Get a appKey/uuid for getting the session key
appKey_var = self._search_regex(
r'value\s*\(\s*["\']appGridApplicationKey["\']\s*,\s*([0-9a-f]+)',
gigya_sc, 'appKey variable')
appKey = self._search_regex(
r'var\s+%s\s*=\s*["\']([0-9a-f]+)' % appKey_var, gigya_sc, 'appKey')
session_json = self._download_json(
'https://appgrid-api.cloud.accedo.tv/session',
video_id, 'Downloading session keys', query={
'appKey': appKey,
'uuid': compat_str(uuid.uuid4()),
})
paths = self._download_json(
'https://appgrid-api.cloud.accedo.tv/metadata/general_configuration,%20web_configuration',
video_id, 'Downloading paths JSON',
query={'sessionKey': compat_str(session_json['sessionKey'])})
ooyala_s = paths['general_configuration']['api_configuration']['ooyala_search']
source = self._download_json(
'http://%s%s%s/docs/%s' % (
ooyala_s['base_url'], ooyala_s['full_path'],
ooyala_s['provider_id'], video_id),
video_id, 'Downloading data JSON', query={
'include_titles': 'Series,Season',
'product_name': 'test',
'format': 'full',
})['hits']['hits'][0]['_source']
embedCode = source['offers'][0]['embed_codes'][0]
titles = source['localizable_titles'][0]
title = titles.get('title_medium') or titles['title_long']
description = titles.get('summary_long') or titles.get('summary_medium')
def get(key1, key2):
value1 = source.get(key1)
if not value1 or not isinstance(value1, list):
return
if not isinstance(value1[0], dict):
return
return value1[0].get(key2)
series = get('localizable_titles_series', 'title_medium')
season = get('localizable_titles_season', 'title_medium')
season_number = int_or_none(source.get('season_number'))
season_id = source.get('season_id')
episode = titles.get('title_sort_name')
episode_number = int_or_none(source.get('episode_number'))
duration = parse_duration(get('videos', 'duration'))
return {
'_type': 'url_transparent',
# for some reason only HLS is supported
'url': smuggle_url('ooyala:' + embedCode, {'supportedformats': 'm3u8,dash'}),
'id': video_id,
'title': title,
'description': description,
'series': series,
'season': season,
'season_number': season_number,
'season_id': season_id,
'episode': episode,
'episode_number': episode_number,
'duration': duration,
'thumbnail': get('images', 'url'),
}
| gpl-3.0 |
LaboratoireMecaniqueLille/Ximea | old/ximea_display_REC.py | 1 | 8856 | import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import rcParams
import SimpleITK as sitk
from multiprocessing import Process, Pipe, Value
from matplotlib.widgets import Slider, Button
rcParams['font.family'] = 'serif'
#ps aux | grep python # KILL python process ...
#kill -9 insert_here_the_python_thread_number # ... try it if ximea won't open again.
plt.close('all')
############################## Parameters
nbr_images=400 # enter here the numer of images you need to save.
save_directory="/home/corentin/Bureau/ximea/" # path to save repository. BE AWARE that this scripts will erase previous images without regrets or remorse.
exposure= 10000 # exposition time, in microseconds
gain=2
height=1024 # reducing this one allows one to increase the FPS
width=1024 # doesn't work for this one
data_format=6 #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
external_trigger= False #set to True if you trig with external source (arduino...). BE AWARE there is a 10s waiting time for the ximea, meaning if you wait more that 10 sec to trigg, ximea will return an error and stop working.
set_FPS=False # set to True if you want to manually set the frame rate. It has 0.1 FPS precison @88FPS . If you need more precision, please use external trigger with arduino.
FPS=50 # set here the frame rate you need. This parameter will only work if set_FPS =True.
numdevice = 0 # Set the number of the camera (if several cameras plugged)
##############################
rec_send , rec_recv = Pipe()
anim_send, anim_recv = Pipe()
rec_signal=Value('i',0)
plot_signal=Value('i',0)
#cap = cv2.VideoCapture(cv2.CAP_XIAPI) # open the ximea device
#cap = cv2.VideoCapture(cv2.CAP_XIAPI + numdevice) # open the ximea device Ximea devices start at 1100. 1100 => device 0, 1101 => device 1
#if external_trigger==True: # this condition activate the trigger mode
#cap.set(cv2.CAP_PROP_XI_TRG_SOURCE,1)
#cap.set(cv2.CAP_PROP_XI_GPI_SELECTOR,1)
#cap.set(cv2.CAP_PROP_XI_GPI_MODE,1)
#cap.set(cv2.CAP_PROP_XI_DATA_FORMAT,data_format) #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
#if data_format ==1 or data_format==6: #increase the FPS in 10 bits
#cap.set(cv2.CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH,10)
#cap.set(cv2.CAP_PROP_XI_DATA_PACKING,1)
#cap.set(cv2.CAP_PROP_XI_AEAG,0)#auto gain auto exposure
#cap.set(cv2.CAP_PROP_FRAME_WIDTH,width); # doesn't work for this one
##cap.set(cv2.CAP_PROP_XI_OFFSET_X,640);
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height); # reducing this one allows one to increase the FPS
##cap.set(cv2.CAP_PROP_XI_DOWNSAMPLING,0) # activate this one if you need to downsample your images, i.e if you need a very high FPS and other options are not enough
##print cap.get(cv2.CAP_PROP_FRAME_WIDTH)
##print cap.get(cv2.CAP_PROP_FRAME_HEIGHT);
#cap.set(cv2.CAP_PROP_EXPOSURE,exposure) # setting up exposure
#cap.set(cv2.CAP_PROP_GAIN,gain) #setting up gain
#ret, frame = cap.read() # read a frame
### initialising the histogram
#if cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==0 or cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==5:
#x=np.arange(0,256,4)
#if cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==1 or cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==6:
#x=np.arange(0,1024,4)
#hist=np.ones(np.shape(x))
### initialising graph and axes
rat = 0.7
Width=7
Height=7.
rat = 0.7
Width=7
Height=7.
fig=plt.figure(figsize=(Height, Width))
frame=np.zeros((height,width))
axim = fig.add_axes([0.15, 0.135, rat, rat*(Height/Width)]) # Image frame
im = axim.imshow(frame,cmap=plt.cm.gray,interpolation='nearest') # display the first image
RECax = plt.axes([0.01, (0.15+rat)/2, 0.05, 0.05]) # define size and position
button = Button(RECax, 'REC', color='red', hovercolor='0.975') # define button
#fig=plt.figure(figsize=(Height, Width))
#ax=fig.add_subplot(111)
##axim = fig.add_axes([0.15, 0.135, rat, rat*(Height/Width)]) # Image frame
##cax = fig.add_axes([0.17+rat, 0.135, 0.02, rat*(Height/Width)]) # colorbar frame
##axhist=fig.add_axes([0.15,(0.17+rat),rat,0.1]) # histogram frame
##axhist.set_xlim([0,max(x)]) #set histogram limit in x...
##axhist.set_ylim([0,1]) # ... and y
#frame=np.zeros((height,width))
#im = ax.imshow(frame,cmap=plt.cm.gray,interpolation='nearest') # display the first image
##li,= axhist.plot(x,hist) #plot first histogram
##cb = fig.colorbar(im, cax=cax) #plot colorbar
##cax.axis('off')
#fig.canvas.draw()
#plt.show(block=False)
### define cursors here
#axcolor = 'lightgoldenrodyellow'
#axExp = plt.axes([0.15, 0.02,rat, 0.03], axisbg=axcolor) # define position and size
#sExp = Slider(axExp, 'Exposure', 200, 50000, valinit=exposure) #Exposition max = 1000000 # define slider with previous position and size
#axGain= plt.axes([0.15, 0.07,rat, 0.03], axisbg=axcolor)
#sGain = Slider(axGain, 'Gain', -1, 6, valinit=gain)
#def update(val): # this function updates the exposure and gain values
#cap.set(cv2.CAP_PROP_EXPOSURE,sExp.val)
#cap.set(cv2.CAP_PROP_GAIN,sGain.val)
#fig.canvas.draw_idle()
#sExp.on_changed(update) # call for update everytime the cursors change
#sGain.on_changed(update)
### define buttons here
#RECax = plt.axes([0.01, (0.15+rat)/2, 0.05, 0.05]) # define size and position
#button = Button(RECax, 'REC', color='red', hovercolor='0.975') # define button
def REC(): # when called, read "nbr_images" and save them as .tiff in save_directory
while True:
while rec_signal.value!=1:
indent=True
t0=time.time()
last_t=0
i=0
while(i<nbr_images):
if set_FPS==True and last_t!=0: #This loop is used to set the FPS
while (time.time()-last_t) < 1./FPS:
indent=True
last_t=time.time()
frame = rec_recv.recv()
image=sitk.GetImageFromArray(frame)
sitk.WriteImage(image,save_directory+"img_%.5d.tiff" %i) ### works fast in 8 or 16 bit, always use sitk.
i+=1
rec_signal.value=0
t=time.time()-t0
print "FPS = %s"%(nbr_images/t)
#def REC_one(event): # when called, read 1 image and save it as .tiff in save_directory with a timestamp, so the next REC will not erase the previous one
#ret, frame = cap.read()
#image=sitk.GetImageFromArray(frame)
#sitk.WriteImage(image,save_directory+"img_%.5d.tiff" %(time.time())) ### works fast in 8 or 16 bit, always use sitk.
def REC2(event):
rec_signal.value=1
#button.on_clicked(REC2) # on click, call the REC function
### Main
def function(i):
print "function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
plot_signal.value=1
frame=anim_recv.recv() # read a frame
plot_signal.value=0
print "received!"
print frame[0]
im.set_data(frame)
return axim
def get_frame():
cap = cv2.VideoCapture(cv2.CAP_XIAPI + numdevice) # open the ximea device Ximea devices start at 1100. 1100 => device 0, 1101 => device 1
if external_trigger==True: # this condition activate the trigger mode
cap.set(cv2.CAP_PROP_XI_TRG_SOURCE,1)
cap.set(cv2.CAP_PROP_XI_GPI_SELECTOR,1)
cap.set(cv2.CAP_PROP_XI_GPI_MODE,1)
cap.set(cv2.CAP_PROP_XI_DATA_FORMAT,data_format) #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
if data_format ==1 or data_format==6: #increase the FPS in 10 bits
cap.set(cv2.CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH,10)
cap.set(cv2.CAP_PROP_XI_DATA_PACKING,1)
cap.set(cv2.CAP_PROP_XI_AEAG,0)#auto gain auto exposure
cap.set(cv2.CAP_PROP_FRAME_WIDTH,width); # doesn't work for this one
#cap.set(cv2.CAP_PROP_XI_OFFSET_X,640);
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height); # reducing this one allows one to increase the FPS
#cap.set(cv2.CAP_PROP_XI_DOWNSAMPLING,0) # activate this one if you need to downsample your images, i.e if you need a very high FPS and other options are not enough
#print cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#print cap.get(cv2.CAP_PROP_FRAME_HEIGHT);
cap.set(cv2.CAP_PROP_EXPOSURE,exposure) # setting up exposure
cap.set(cv2.CAP_PROP_GAIN,gain) #setting up gain
while True:
ret, frame=cap.read()
print "this is Patrick"
print frame[0]
print plot_signal.value
if plot_signal.value==1:
anim_send.send(frame)
print "sended"
print rec_signal.value
if rec_signal.value==1:
rec_send.send(frame)
Get_frame=Process(target=get_frame,args=())
time.sleep(1)
#Rec=Process(target=REC,args=())
#Ani=Process(target=ani,args=())
Get_frame.start()
time.sleep(1)
#Ani.start()
#Rec.start()
#ani = animation.FuncAnimation(fig, anim, interval=20, frames=20, blit=False) # This function call the anim function to update averything in the figure.
#plt.show()
Get_frame.join()
time.sleep(1)
#Ani.join()
animation.FuncAnimation(fig, function, interval=20, frames=20, blit=False) # This function call the anim function to update averything in the figure.
plt.show()
#Rec.join()
| gpl-2.0 |
jeasoft/odoo | addons/purchase_requisition/__openerp__.py | 260 | 2424 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Requisitions',
'version': '0.1',
'author': 'OpenERP SA',
'category': 'Purchase Management',
'website': 'https://www.odoo.com/page/purchase',
'description': """
This module allows you to manage your Purchase Requisition.
===========================================================
When a purchase order is created, you now have the opportunity to save the
related requisition. This new object will regroup and will allow you to easily
keep track and order all your purchase orders.
""",
'depends' : ['purchase'],
'demo': ['purchase_requisition_demo.xml'],
'data': ['views/purchase_requisition.xml',
'security/purchase_tender.xml',
'wizard/purchase_requisition_partner_view.xml',
'wizard/bid_line_qty_view.xml',
'purchase_requisition_data.xml',
'purchase_requisition_view.xml',
'purchase_requisition_report.xml',
'purchase_requisition_workflow.xml',
'security/ir.model.access.csv','purchase_requisition_sequence.xml',
'views/report_purchaserequisition.xml',
],
'auto_install': False,
'test': [
'test/purchase_requisition_users.yml',
'test/purchase_requisition_demo.yml',
'test/cancel_purchase_requisition.yml',
'test/purchase_requisition.yml',
],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chenc10/Spark-PAF | dist/ec2/lib/boto-2.34.0/boto/vpc/networkacl.py | 151 | 4976 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Network ACL
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.resultset import ResultSet
class Icmp(object):
"""
Defines the ICMP code and type.
"""
def __init__(self, connection=None):
self.code = None
self.type = None
def __repr__(self):
return 'Icmp::code:%s, type:%s)' % ( self.code, self.type)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'type':
self.type = value
class NetworkAcl(TaggedEC2Object):
def __init__(self, connection=None):
super(NetworkAcl, self).__init__(connection)
self.id = None
self.vpc_id = None
self.network_acl_entries = []
self.associations = []
def __repr__(self):
return 'NetworkAcl:%s' % self.id
def startElement(self, name, attrs, connection):
result = super(NetworkAcl, self).startElement(name, attrs, connection)
if result is not None:
# Parent found an interested element, just return it
return result
if name == 'entrySet':
self.network_acl_entries = ResultSet([('item', NetworkAclEntry)])
return self.network_acl_entries
elif name == 'associationSet':
self.associations = ResultSet([('item', NetworkAclAssociation)])
return self.associations
else:
return None
def endElement(self, name, value, connection):
if name == 'networkAclId':
self.id = value
elif name == 'vpcId':
self.vpc_id = value
else:
setattr(self, name, value)
class NetworkAclEntry(object):
def __init__(self, connection=None):
self.rule_number = None
self.protocol = None
self.rule_action = None
self.egress = None
self.cidr_block = None
self.port_range = PortRange()
self.icmp = Icmp()
def __repr__(self):
return 'Acl:%s' % self.rule_number
def startElement(self, name, attrs, connection):
if name == 'portRange':
return self.port_range
elif name == 'icmpTypeCode':
return self.icmp
else:
return None
def endElement(self, name, value, connection):
if name == 'cidrBlock':
self.cidr_block = value
elif name == 'egress':
self.egress = value
elif name == 'protocol':
self.protocol = value
elif name == 'ruleAction':
self.rule_action = value
elif name == 'ruleNumber':
self.rule_number = value
class NetworkAclAssociation(object):
def __init__(self, connection=None):
self.id = None
self.subnet_id = None
self.network_acl_id = None
def __repr__(self):
return 'NetworkAclAssociation:%s' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'networkAclAssociationId':
self.id = value
elif name == 'networkAclId':
self.network_acl_id = value
elif name == 'subnetId':
self.subnet_id = value
class PortRange(object):
"""
Define the port range for the ACL entry if it is tcp / udp
"""
def __init__(self, connection=None):
self.from_port = None
self.to_port = None
def __repr__(self):
return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'from':
self.from_port = value
elif name == 'to':
self.to_port = value
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.